1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_mempool.h>
8 #include <rte_ethdev.h>
9 #include <rte_eventdev.h>
10 #include <rte_bus_vdev.h>
12 #include <rte_event_eth_rx_adapter.h>
16 #define MAX_NUM_RX_QUEUE 64
17 #define NB_MBUFS (8192 * num_ports * MAX_NUM_RX_QUEUE)
18 #define MBUF_CACHE_SIZE 512
19 #define MBUF_PRIV_SIZE 0
20 #define TEST_INST_ID 0
22 #define TEST_ETHDEV_ID 0
24 struct event_eth_rx_adapter_test_params {
25 struct rte_mempool *mp;
26 uint16_t rx_rings, tx_rings;
28 int rx_intr_port_inited;
29 uint16_t rx_intr_port;
32 static struct event_eth_rx_adapter_test_params default_params;
33 static bool event_dev_created;
34 static bool eth_dev_created;
37 port_init_common(uint16_t port, const struct rte_eth_conf *port_conf,
38 struct rte_mempool *mp)
40 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
43 struct rte_eth_dev_info dev_info;
45 if (!rte_eth_dev_is_valid_port(port))
48 retval = rte_eth_dev_configure(port, 0, 0, port_conf);
50 retval = rte_eth_dev_info_get(port, &dev_info);
54 default_params.rx_rings = RTE_MIN(dev_info.max_rx_queues,
56 default_params.tx_rings = 1;
58 /* Configure the Ethernet device. */
59 retval = rte_eth_dev_configure(port, default_params.rx_rings,
60 default_params.tx_rings, port_conf);
64 for (q = 0; q < default_params.rx_rings; q++) {
65 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
66 rte_eth_dev_socket_id(port), NULL, mp);
71 /* Allocate and set up 1 TX queue per Ethernet port. */
72 for (q = 0; q < default_params.tx_rings; q++) {
73 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
74 rte_eth_dev_socket_id(port), NULL);
79 /* Start the Ethernet port. */
80 retval = rte_eth_dev_start(port);
84 /* Display the port MAC address. */
85 struct rte_ether_addr addr;
86 retval = rte_eth_macaddr_get(port, &addr);
89 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
90 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
91 (unsigned int)port, RTE_ETHER_ADDR_BYTES(&addr));
93 /* Enable RX in promiscuous mode for the Ethernet device. */
94 retval = rte_eth_promiscuous_enable(port);
102 port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
104 static const struct rte_eth_conf port_conf_default = {
106 .mq_mode = RTE_ETH_MQ_RX_NONE,
113 return port_init_common(port, &port_conf_default, mp);
117 port_init(uint16_t port, struct rte_mempool *mp)
119 static const struct rte_eth_conf port_conf_default = {
121 .mq_mode = RTE_ETH_MQ_RX_NONE,
125 return port_init_common(port, &port_conf_default, mp);
129 init_port_rx_intr(int num_ports)
135 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
139 RTE_MBUF_DEFAULT_BUF_SIZE,
141 if (!default_params.mp)
144 RTE_ETH_FOREACH_DEV(portid) {
145 retval = port_init_rx_intr(portid, default_params.mp);
148 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, portid,
149 &default_params.caps);
152 if (!(default_params.caps &
153 RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
154 default_params.rx_intr_port_inited = 1;
155 default_params.rx_intr_port = portid;
158 retval = rte_eth_dev_stop(portid);
159 TEST_ASSERT(retval == 0, "Failed to stop port %u: %d\n",
166 init_ports(int num_ports)
171 struct rte_mempool *ptr = rte_mempool_lookup("packet_pool");
174 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
178 RTE_MBUF_DEFAULT_BUF_SIZE,
181 default_params.mp = ptr;
183 if (!default_params.mp)
186 RTE_ETH_FOREACH_DEV(portid) {
187 retval = port_init(portid, default_params.mp);
196 testsuite_setup(void)
200 struct rte_event_dev_info dev_info;
202 count = rte_event_dev_count();
204 printf("Failed to find a valid event device,"
205 " testing with event_skeleton device\n");
206 err = rte_vdev_init("event_skeleton", NULL);
207 TEST_ASSERT(err == 0, "Failed to create event_skeleton. err=%d",
209 event_dev_created = true;
212 struct rte_event_dev_config config = {
213 .nb_event_queues = 1,
217 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
218 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
219 config.nb_event_port_dequeue_depth =
220 dev_info.max_event_port_dequeue_depth;
221 config.nb_event_port_enqueue_depth =
222 dev_info.max_event_port_enqueue_depth;
223 config.nb_events_limit =
224 dev_info.max_num_events;
225 err = rte_event_dev_configure(TEST_DEV_ID, &config);
226 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
229 count = rte_eth_dev_count_total();
231 printf("Testing with net_null device\n");
232 err = rte_vdev_init("net_null", NULL);
233 TEST_ASSERT(err == 0, "Failed to create net_null. err=%d",
235 eth_dev_created = true;
239 * eth devices like octeontx use event device to receive packets
240 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
241 * call init_ports after rte_event_dev_configure
243 err = init_ports(rte_eth_dev_count_total());
244 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
246 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
247 &default_params.caps);
248 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n",
255 testsuite_setup_rx_intr(void)
259 struct rte_event_dev_info dev_info;
261 count = rte_event_dev_count();
263 printf("Failed to find a valid event device,"
264 " testing with event_skeleton device\n");
265 err = rte_vdev_init("event_skeleton", NULL);
266 TEST_ASSERT(err == 0, "Failed to create event_skeleton. err=%d",
268 event_dev_created = true;
271 struct rte_event_dev_config config = {
272 .nb_event_queues = 1,
276 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
277 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
278 config.nb_event_port_dequeue_depth =
279 dev_info.max_event_port_dequeue_depth;
280 config.nb_event_port_enqueue_depth =
281 dev_info.max_event_port_enqueue_depth;
282 config.nb_events_limit =
283 dev_info.max_num_events;
285 err = rte_event_dev_configure(TEST_DEV_ID, &config);
286 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
289 count = rte_eth_dev_count_total();
291 printf("Testing with net_null device\n");
292 err = rte_vdev_init("net_null", NULL);
293 TEST_ASSERT(err == 0, "Failed to create net_null. err=%d",
295 eth_dev_created = true;
299 * eth devices like octeontx use event device to receive packets
300 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
301 * call init_ports after rte_event_dev_configure
303 err = init_port_rx_intr(rte_eth_dev_count_total());
304 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
306 if (!default_params.rx_intr_port_inited)
309 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID,
310 default_params.rx_intr_port,
311 &default_params.caps);
312 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
318 testsuite_teardown(void)
322 RTE_ETH_FOREACH_DEV(i)
325 if (eth_dev_created) {
326 err = rte_vdev_uninit("net_null");
328 printf("Failed to delete net_null. err=%d", err);
329 eth_dev_created = false;
332 rte_mempool_free(default_params.mp);
333 if (event_dev_created) {
334 err = rte_vdev_uninit("event_skeleton");
336 printf("Failed to delete event_skeleton. err=%d", err);
337 event_dev_created = false;
340 memset(&default_params, 0, sizeof(default_params));
344 testsuite_teardown_rx_intr(void)
347 if (!default_params.rx_intr_port_inited)
350 rte_eth_dev_stop(default_params.rx_intr_port);
351 if (eth_dev_created) {
352 err = rte_vdev_uninit("net_null");
354 printf("Failed to delete net_null. err=%d", err);
355 eth_dev_created = false;
357 rte_mempool_free(default_params.mp);
358 if (event_dev_created) {
359 err = rte_vdev_uninit("event_skeleton");
361 printf("Failed to delete event_skeleton. err=%d", err);
362 event_dev_created = false;
365 memset(&default_params, 0, sizeof(default_params));
372 struct rte_event_dev_info dev_info;
373 struct rte_event_port_conf rx_p_conf;
375 memset(&rx_p_conf, 0, sizeof(rx_p_conf));
377 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
378 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
380 rx_p_conf.new_event_threshold = dev_info.max_num_events;
381 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
382 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
383 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
385 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
391 adapter_create_with_params(void)
394 struct rte_event_dev_info dev_info;
395 struct rte_event_port_conf rx_p_conf;
396 struct rte_event_eth_rx_adapter_params rxa_params;
398 memset(&rx_p_conf, 0, sizeof(rx_p_conf));
400 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
401 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
403 rx_p_conf.new_event_threshold = dev_info.max_num_events;
404 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
405 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
407 rxa_params.use_queue_event_buf = false;
408 rxa_params.event_buf_size = 0;
410 err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID,
411 TEST_DEV_ID, &rx_p_conf, &rxa_params);
412 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
414 rxa_params.use_queue_event_buf = true;
416 err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID,
417 TEST_DEV_ID, &rx_p_conf, &rxa_params);
418 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
420 err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID,
421 TEST_DEV_ID, &rx_p_conf, &rxa_params);
422 TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err);
428 adapter_queue_event_buf_test(void)
434 struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
436 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
438 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
441 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
444 queue_config.rx_queue_flags = 0;
445 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
447 queue_config.rx_queue_flags =
448 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
450 queue_config.ev = ev;
451 queue_config.servicing_weight = 1;
452 queue_config.event_buf_size = 0;
454 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
457 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
459 queue_config.event_buf_size = 1024;
461 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
464 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
466 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
469 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
475 adapter_queue_stats_test(void)
480 struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
481 struct rte_event_eth_rx_adapter_queue_stats q_stats;
483 err = rte_event_eth_rx_adapter_queue_stats_get(TEST_INST_ID,
486 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
488 err = rte_event_eth_rx_adapter_queue_stats_reset(TEST_INST_ID,
490 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
492 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
494 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
497 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
500 queue_config.rx_queue_flags = 0;
501 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
503 queue_config.rx_queue_flags =
504 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
506 queue_config.ev = ev;
507 queue_config.servicing_weight = 1;
508 queue_config.event_buf_size = 1024;
510 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
513 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
515 err = rte_event_eth_rx_adapter_queue_stats_get(TEST_INST_ID,
518 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
520 err = rte_event_eth_rx_adapter_queue_stats_reset(TEST_INST_ID,
522 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
524 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
527 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
535 rte_event_eth_rx_adapter_free(TEST_INST_ID);
539 adapter_create_free(void)
543 struct rte_event_port_conf rx_p_conf = {
546 .new_event_threshold = 1200,
549 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
551 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
553 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
555 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
557 err = rte_event_eth_rx_adapter_create(TEST_INST_ID,
558 TEST_DEV_ID, &rx_p_conf);
559 TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
561 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
562 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
564 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
565 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
567 err = rte_event_eth_rx_adapter_free(1);
568 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
574 adapter_create_free_with_params(void)
578 struct rte_event_port_conf rx_p_conf = {
581 .new_event_threshold = 1200,
584 struct rte_event_eth_rx_adapter_params rxa_params = {
585 .event_buf_size = 1024
588 err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID,
589 TEST_DEV_ID, NULL, NULL);
590 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
592 err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID,
593 TEST_DEV_ID, &rx_p_conf, &rxa_params);
594 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
596 err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID,
597 TEST_DEV_ID, &rx_p_conf, &rxa_params);
598 TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
600 rxa_params.event_buf_size = 0;
601 err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID,
602 TEST_DEV_ID, &rx_p_conf, &rxa_params);
603 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
605 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
606 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
608 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
609 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
611 err = rte_event_eth_rx_adapter_free(1);
612 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
618 adapter_queue_add_del(void)
624 struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
626 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
628 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
631 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
634 queue_config.rx_queue_flags = 0;
635 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
637 queue_config.rx_queue_flags =
638 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
640 queue_config.ev = ev;
641 queue_config.servicing_weight = 1;
643 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
644 rte_eth_dev_count_total(),
646 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
648 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
649 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
652 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
654 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
656 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
658 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
662 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
664 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
667 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
669 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
673 TEST_ASSERT(err == -EINVAL, "Expected EINVAL got %d", err);
675 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
678 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
680 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
682 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
684 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
686 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
688 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
690 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
693 err = rte_event_eth_rx_adapter_queue_add(1, TEST_ETHDEV_ID, -1,
695 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
697 err = rte_event_eth_rx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
698 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
704 adapter_multi_eth_add_del(void)
709 uint16_t port_index, port_index_base, drv_id = 0;
710 char driver_name[50];
712 struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
715 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
718 queue_config.rx_queue_flags = 0;
719 queue_config.ev = ev;
720 queue_config.servicing_weight = 1;
722 /* stop eth devices for existing */
724 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
725 err = rte_eth_dev_stop(port_index);
726 TEST_ASSERT(err == 0, "Failed to stop port %u: %d\n",
730 /* add the max port for rx_adapter */
731 port_index = rte_eth_dev_count_total();
732 port_index_base = port_index;
733 for (; port_index < RTE_MAX_ETHPORTS; port_index += 1) {
734 snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
736 err = rte_vdev_init(driver_name, NULL);
737 TEST_ASSERT(err == 0, "Failed driver %s got %d",
742 err = init_ports(rte_eth_dev_count_total());
743 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
745 /* eth_rx_adapter_queue_add for n ports */
747 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
748 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
751 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
754 /* eth_rx_adapter_queue_del n ports */
756 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
757 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
759 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
762 /* delete vdev ports */
763 for (drv_id = 0, port_index = port_index_base;
764 port_index < RTE_MAX_ETHPORTS;
765 drv_id += 1, port_index += 1) {
766 snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
768 err = rte_vdev_uninit(driver_name);
769 TEST_ASSERT(err == 0, "Failed driver %s got %d",
777 adapter_intr_queue_add_del(void)
783 struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
785 if (!default_params.rx_intr_port_inited)
788 eth_port = default_params.rx_intr_port;
789 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, eth_port, &cap);
790 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
793 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
796 queue_config.rx_queue_flags = 0;
797 queue_config.ev = ev;
799 /* weight = 0 => interrupt mode */
800 queue_config.servicing_weight = 0;
802 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
804 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
807 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
811 queue_config.servicing_weight = 0;
812 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
816 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
818 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
820 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
823 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
826 /* del remaining queues */
827 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
830 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
833 queue_config.servicing_weight = 0;
834 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
838 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
840 /* intr -> poll mode queue */
841 queue_config.servicing_weight = 1;
843 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
844 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
848 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
851 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
855 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
858 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
861 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
867 adapter_start_stop(void)
873 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
876 struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
878 queue_config.rx_queue_flags = 0;
879 if (default_params.caps &
880 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
882 queue_config.rx_queue_flags =
883 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
886 queue_config.ev = ev;
887 queue_config.servicing_weight = 1;
889 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
891 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
893 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
894 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
896 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
897 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
899 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
901 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
903 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
904 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
906 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
907 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
909 err = rte_event_eth_rx_adapter_start(1);
910 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
912 err = rte_event_eth_rx_adapter_stop(1);
913 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
922 struct rte_event_eth_rx_adapter_stats stats;
924 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, NULL);
925 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
927 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, &stats);
928 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
930 err = rte_event_eth_rx_adapter_stats_get(1, &stats);
931 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
937 adapter_queue_conf(void)
940 struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
942 /* Case 1: queue conf get without any queues in Rx adapter */
943 err = rte_event_eth_rx_adapter_queue_conf_get(TEST_INST_ID,
946 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
948 /* Add queue to Rx adapter */
949 queue_conf.ev.queue_id = 0;
950 queue_conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
951 queue_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
953 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
956 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
958 /* Case 2: queue conf get with queue added to Rx adapter */
959 err = rte_event_eth_rx_adapter_queue_conf_get(TEST_INST_ID,
962 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
964 /* Case 3: queue conf get with invalid rx queue id */
965 err = rte_event_eth_rx_adapter_queue_conf_get(TEST_INST_ID,
968 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
970 /* Case 4: queue conf get with NULL queue conf struct */
971 err = rte_event_eth_rx_adapter_queue_conf_get(TEST_INST_ID,
974 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
976 /* Delete queue from the Rx adapter */
977 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
980 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
985 static struct unit_test_suite event_eth_rx_tests = {
986 .suite_name = "rx event eth adapter test suite",
987 .setup = testsuite_setup,
988 .teardown = testsuite_teardown,
990 TEST_CASE_ST(NULL, NULL, adapter_create_free),
991 TEST_CASE_ST(NULL, NULL, adapter_create_free_with_params),
992 TEST_CASE_ST(adapter_create, adapter_free,
993 adapter_queue_add_del),
994 TEST_CASE_ST(adapter_create, adapter_free,
995 adapter_multi_eth_add_del),
996 TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
997 TEST_CASE_ST(adapter_create, adapter_free, adapter_stats),
998 TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf),
999 TEST_CASE_ST(adapter_create_with_params, adapter_free,
1000 adapter_queue_event_buf_test),
1001 TEST_CASE_ST(adapter_create_with_params, adapter_free,
1002 adapter_queue_stats_test),
1003 TEST_CASES_END() /**< NULL terminate unit test array */
1007 static struct unit_test_suite event_eth_rx_intr_tests = {
1008 .suite_name = "rx event eth adapter test suite",
1009 .setup = testsuite_setup_rx_intr,
1010 .teardown = testsuite_teardown_rx_intr,
1011 .unit_test_cases = {
1012 TEST_CASE_ST(adapter_create, adapter_free,
1013 adapter_intr_queue_add_del),
1014 TEST_CASES_END() /**< NULL terminate unit test array */
1019 test_event_eth_rx_adapter_common(void)
1021 return unit_test_suite_runner(&event_eth_rx_tests);
1025 test_event_eth_rx_intr_adapter_common(void)
1027 return unit_test_suite_runner(&event_eth_rx_intr_tests);
1030 REGISTER_TEST_COMMAND(event_eth_rx_adapter_autotest,
1031 test_event_eth_rx_adapter_common);
1032 REGISTER_TEST_COMMAND(event_eth_rx_intr_adapter_autotest,
1033 test_event_eth_rx_intr_adapter_common);