1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_mempool.h>
8 #include <rte_ethdev.h>
9 #include <rte_eventdev.h>
10 #include <rte_bus_vdev.h>
12 #include <rte_event_eth_rx_adapter.h>
16 #define MAX_NUM_RX_QUEUE 64
17 #define NB_MBUFS (8192 * num_ports * MAX_NUM_RX_QUEUE)
18 #define MBUF_CACHE_SIZE 512
19 #define MBUF_PRIV_SIZE 0
20 #define TEST_INST_ID 0
22 #define TEST_ETHDEV_ID 0
24 struct event_eth_rx_adapter_test_params {
25 struct rte_mempool *mp;
26 uint16_t rx_rings, tx_rings;
28 int rx_intr_port_inited;
29 uint16_t rx_intr_port;
32 static struct event_eth_rx_adapter_test_params default_params;
35 port_init_common(uint16_t port, const struct rte_eth_conf *port_conf,
36 struct rte_mempool *mp)
38 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
41 struct rte_eth_dev_info dev_info;
43 if (!rte_eth_dev_is_valid_port(port))
46 retval = rte_eth_dev_configure(port, 0, 0, port_conf);
48 retval = rte_eth_dev_info_get(port, &dev_info);
52 default_params.rx_rings = RTE_MIN(dev_info.max_rx_queues,
54 default_params.tx_rings = 1;
56 /* Configure the Ethernet device. */
57 retval = rte_eth_dev_configure(port, default_params.rx_rings,
58 default_params.tx_rings, port_conf);
62 for (q = 0; q < default_params.rx_rings; q++) {
63 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
64 rte_eth_dev_socket_id(port), NULL, mp);
69 /* Allocate and set up 1 TX queue per Ethernet port. */
70 for (q = 0; q < default_params.tx_rings; q++) {
71 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
72 rte_eth_dev_socket_id(port), NULL);
77 /* Start the Ethernet port. */
78 retval = rte_eth_dev_start(port);
82 /* Display the port MAC address. */
83 struct rte_ether_addr addr;
84 rte_eth_macaddr_get(port, &addr);
85 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
86 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
88 addr.addr_bytes[0], addr.addr_bytes[1],
89 addr.addr_bytes[2], addr.addr_bytes[3],
90 addr.addr_bytes[4], addr.addr_bytes[5]);
92 /* Enable RX in promiscuous mode for the Ethernet device. */
93 retval = rte_eth_promiscuous_enable(port);
101 port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
103 static const struct rte_eth_conf port_conf_default = {
105 .mq_mode = ETH_MQ_RX_NONE,
112 return port_init_common(port, &port_conf_default, mp);
116 port_init(uint16_t port, struct rte_mempool *mp)
118 static const struct rte_eth_conf port_conf_default = {
120 .mq_mode = ETH_MQ_RX_NONE,
124 return port_init_common(port, &port_conf_default, mp);
128 init_port_rx_intr(int num_ports)
134 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
138 RTE_MBUF_DEFAULT_BUF_SIZE,
140 if (!default_params.mp)
143 RTE_ETH_FOREACH_DEV(portid) {
144 retval = port_init_rx_intr(portid, default_params.mp);
147 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, portid,
148 &default_params.caps);
151 if (!(default_params.caps &
152 RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
153 default_params.rx_intr_port_inited = 1;
154 default_params.rx_intr_port = portid;
157 rte_eth_dev_stop(portid);
163 init_ports(int num_ports)
168 struct rte_mempool *ptr = rte_mempool_lookup("packet_pool");
171 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
175 RTE_MBUF_DEFAULT_BUF_SIZE,
178 default_params.mp = ptr;
180 if (!default_params.mp)
183 RTE_ETH_FOREACH_DEV(portid) {
184 retval = port_init(portid, default_params.mp);
193 testsuite_setup(void)
197 struct rte_event_dev_info dev_info;
199 count = rte_event_dev_count();
201 printf("Failed to find a valid event device,"
202 " testing with event_skeleton device\n");
203 rte_vdev_init("event_skeleton", NULL);
206 struct rte_event_dev_config config = {
207 .nb_event_queues = 1,
211 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
212 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
213 config.nb_event_port_dequeue_depth =
214 dev_info.max_event_port_dequeue_depth;
215 config.nb_event_port_enqueue_depth =
216 dev_info.max_event_port_enqueue_depth;
217 config.nb_events_limit =
218 dev_info.max_num_events;
219 err = rte_event_dev_configure(TEST_DEV_ID, &config);
220 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
224 * eth devices like octeontx use event device to receive packets
225 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
226 * call init_ports after rte_event_dev_configure
228 err = init_ports(rte_eth_dev_count_total());
229 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
231 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
232 &default_params.caps);
233 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n",
240 testsuite_setup_rx_intr(void)
244 struct rte_event_dev_info dev_info;
246 count = rte_event_dev_count();
248 printf("Failed to find a valid event device,"
249 " testing with event_skeleton device\n");
250 rte_vdev_init("event_skeleton", NULL);
253 struct rte_event_dev_config config = {
254 .nb_event_queues = 1,
258 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
259 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
260 config.nb_event_port_dequeue_depth =
261 dev_info.max_event_port_dequeue_depth;
262 config.nb_event_port_enqueue_depth =
263 dev_info.max_event_port_enqueue_depth;
264 config.nb_events_limit =
265 dev_info.max_num_events;
267 err = rte_event_dev_configure(TEST_DEV_ID, &config);
268 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
272 * eth devices like octeontx use event device to receive packets
273 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
274 * call init_ports after rte_event_dev_configure
276 err = init_port_rx_intr(rte_eth_dev_count_total());
277 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
279 if (!default_params.rx_intr_port_inited)
282 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID,
283 default_params.rx_intr_port,
284 &default_params.caps);
285 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
291 testsuite_teardown(void)
294 RTE_ETH_FOREACH_DEV(i)
297 rte_mempool_free(default_params.mp);
301 testsuite_teardown_rx_intr(void)
303 if (!default_params.rx_intr_port_inited)
306 rte_eth_dev_stop(default_params.rx_intr_port);
307 rte_mempool_free(default_params.mp);
314 struct rte_event_dev_info dev_info;
315 struct rte_event_port_conf rx_p_conf;
317 memset(&rx_p_conf, 0, sizeof(rx_p_conf));
319 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
320 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
322 rx_p_conf.new_event_threshold = dev_info.max_num_events;
323 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
324 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
325 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
327 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
335 rte_event_eth_rx_adapter_free(TEST_INST_ID);
339 adapter_create_free(void)
343 struct rte_event_port_conf rx_p_conf = {
346 .new_event_threshold = 1200,
349 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
351 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
353 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
355 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
357 err = rte_event_eth_rx_adapter_create(TEST_INST_ID,
358 TEST_DEV_ID, &rx_p_conf);
359 TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
361 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
362 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
364 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
365 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
367 err = rte_event_eth_rx_adapter_free(1);
368 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
374 adapter_queue_add_del(void)
380 struct rte_event_eth_rx_adapter_queue_conf queue_config;
382 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
384 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
387 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
390 queue_config.rx_queue_flags = 0;
391 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
393 queue_config.rx_queue_flags =
394 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
396 queue_config.ev = ev;
397 queue_config.servicing_weight = 1;
399 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
400 rte_eth_dev_count_total(),
402 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
404 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
405 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
408 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
410 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
412 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
414 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
418 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
420 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
423 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
425 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
429 TEST_ASSERT(err == -EINVAL, "Expected EINVAL got %d", err);
431 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
434 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
436 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
438 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
440 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
442 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
444 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
446 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
449 err = rte_event_eth_rx_adapter_queue_add(1, TEST_ETHDEV_ID, -1,
451 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
453 err = rte_event_eth_rx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
454 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
460 adapter_multi_eth_add_del(void)
465 uint16_t port_index, drv_id = 0;
466 char driver_name[50];
468 struct rte_event_eth_rx_adapter_queue_conf queue_config;
471 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
474 queue_config.rx_queue_flags = 0;
475 queue_config.ev = ev;
476 queue_config.servicing_weight = 1;
478 /* stop eth devices for existing */
480 for (; port_index < rte_eth_dev_count_total(); port_index += 1)
481 rte_eth_dev_stop(port_index);
483 /* add the max port for rx_adapter */
484 port_index = rte_eth_dev_count_total();
485 for (; port_index < RTE_MAX_ETHPORTS; port_index += 1) {
486 snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
488 err = rte_vdev_init(driver_name, NULL);
489 TEST_ASSERT(err == 0, "Failed driver %s got %d",
494 err = init_ports(rte_eth_dev_count_total());
495 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
497 /* eth_rx_adapter_queue_add for n ports */
499 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
500 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
503 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
506 /* eth_rx_adapter_queue_del n ports */
508 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
509 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
511 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
518 adapter_intr_queue_add_del(void)
524 struct rte_event_eth_rx_adapter_queue_conf queue_config;
526 if (!default_params.rx_intr_port_inited)
529 eth_port = default_params.rx_intr_port;
530 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, eth_port, &cap);
531 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
534 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
537 queue_config.rx_queue_flags = 0;
538 queue_config.ev = ev;
540 /* weight = 0 => interrupt mode */
541 queue_config.servicing_weight = 0;
543 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
545 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
548 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
552 queue_config.servicing_weight = 0;
553 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
557 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
559 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
561 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
564 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
567 /* del remaining queues */
568 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
571 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
574 queue_config.servicing_weight = 0;
575 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
579 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
581 /* intr -> poll mode queue */
582 queue_config.servicing_weight = 1;
584 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
585 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
589 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
592 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
596 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
599 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
602 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
608 adapter_start_stop(void)
614 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
617 struct rte_event_eth_rx_adapter_queue_conf queue_config;
619 queue_config.rx_queue_flags = 0;
620 if (default_params.caps &
621 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
623 queue_config.rx_queue_flags =
624 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
627 queue_config.ev = ev;
628 queue_config.servicing_weight = 1;
630 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
632 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
634 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
635 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
637 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
638 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
640 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
642 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
644 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
645 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
647 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
648 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
650 err = rte_event_eth_rx_adapter_start(1);
651 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
653 err = rte_event_eth_rx_adapter_stop(1);
654 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
663 struct rte_event_eth_rx_adapter_stats stats;
665 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, NULL);
666 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
668 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, &stats);
669 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
671 err = rte_event_eth_rx_adapter_stats_get(1, &stats);
672 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
677 static struct unit_test_suite event_eth_rx_tests = {
678 .suite_name = "rx event eth adapter test suite",
679 .setup = testsuite_setup,
680 .teardown = testsuite_teardown,
682 TEST_CASE_ST(NULL, NULL, adapter_create_free),
683 TEST_CASE_ST(adapter_create, adapter_free,
684 adapter_queue_add_del),
685 TEST_CASE_ST(adapter_create, adapter_free,
686 adapter_multi_eth_add_del),
687 TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
688 TEST_CASE_ST(adapter_create, adapter_free, adapter_stats),
689 TEST_CASES_END() /**< NULL terminate unit test array */
693 static struct unit_test_suite event_eth_rx_intr_tests = {
694 .suite_name = "rx event eth adapter test suite",
695 .setup = testsuite_setup_rx_intr,
696 .teardown = testsuite_teardown_rx_intr,
698 TEST_CASE_ST(adapter_create, adapter_free,
699 adapter_intr_queue_add_del),
700 TEST_CASES_END() /**< NULL terminate unit test array */
705 test_event_eth_rx_adapter_common(void)
707 return unit_test_suite_runner(&event_eth_rx_tests);
711 test_event_eth_rx_intr_adapter_common(void)
713 return unit_test_suite_runner(&event_eth_rx_intr_tests);
716 REGISTER_TEST_COMMAND(event_eth_rx_adapter_autotest,
717 test_event_eth_rx_adapter_common);
718 REGISTER_TEST_COMMAND(event_eth_rx_intr_adapter_autotest,
719 test_event_eth_rx_intr_adapter_common);