1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
7 #include <rte_bus_vdev.h>
8 #include <rte_common.h>
9 #include <rte_ethdev.h>
10 #include <rte_eth_ring.h>
11 #include <rte_eventdev.h>
12 #include <rte_event_eth_tx_adapter.h>
14 #include <rte_mempool.h>
15 #include <rte_service.h>
19 #define MAX_NUM_QUEUE RTE_PMD_RING_MAX_RX_RINGS
20 #define TEST_INST_ID 0
24 #define ETH_NAME_LEN 32
25 #define NUM_ETH_PAIR 1
26 #define NUM_ETH_DEV (2 * NUM_ETH_PAIR)
28 #define PAIR_PORT_INDEX(p) ((p) + NUM_ETH_PAIR)
29 #define PORT(p) default_params.port[(p)]
30 #define TEST_ETHDEV_ID PORT(0)
31 #define TEST_ETHDEV_PAIR_ID PORT(PAIR_PORT_INDEX(0))
33 #define EDEV_RETRY 0xffff
35 struct event_eth_tx_adapter_test_params {
36 struct rte_mempool *mp;
37 uint16_t rx_rings, tx_rings;
38 struct rte_ring *r[NUM_ETH_DEV][MAX_NUM_QUEUE];
39 int port[NUM_ETH_DEV];
42 static int event_dev_delete;
43 static struct event_eth_tx_adapter_test_params default_params;
44 static uint64_t eid = ~0ULL;
48 port_init_common(uint8_t port, const struct rte_eth_conf *port_conf,
49 struct rte_mempool *mp)
51 const uint16_t rx_ring_size = RING_SIZE, tx_ring_size = RING_SIZE;
55 if (!rte_eth_dev_is_valid_port(port))
58 default_params.rx_rings = MAX_NUM_QUEUE;
59 default_params.tx_rings = MAX_NUM_QUEUE;
61 /* Configure the Ethernet device. */
62 retval = rte_eth_dev_configure(port, default_params.rx_rings,
63 default_params.tx_rings, port_conf);
67 for (q = 0; q < default_params.rx_rings; q++) {
68 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
69 rte_eth_dev_socket_id(port), NULL, mp);
74 for (q = 0; q < default_params.tx_rings; q++) {
75 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
76 rte_eth_dev_socket_id(port), NULL);
81 /* Start the Ethernet port. */
82 retval = rte_eth_dev_start(port);
86 /* Display the port MAC address. */
87 struct rte_ether_addr addr;
88 retval = rte_eth_macaddr_get(port, &addr);
91 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
92 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
94 addr.addr_bytes[0], addr.addr_bytes[1],
95 addr.addr_bytes[2], addr.addr_bytes[3],
96 addr.addr_bytes[4], addr.addr_bytes[5]);
98 /* Enable RX in promiscuous mode for the Ethernet device. */
99 retval = rte_eth_promiscuous_enable(port);
107 port_init(uint8_t port, struct rte_mempool *mp)
109 struct rte_eth_conf conf = { 0 };
110 return port_init_common(port, &conf, mp);
113 #define RING_NAME_LEN 20
114 #define DEV_NAME_LEN 20
119 char ring_name[ETH_NAME_LEN];
121 struct rte_ring * const *c1;
122 struct rte_ring * const *c2;
125 if (!default_params.mp)
126 default_params.mp = rte_pktmbuf_pool_create("mbuf_pool",
128 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
130 if (!default_params.mp)
133 for (i = 0; i < NUM_ETH_DEV; i++) {
134 for (j = 0; j < MAX_NUM_QUEUE; j++) {
135 snprintf(ring_name, sizeof(ring_name), "R%u%u", i, j);
136 default_params.r[i][j] = rte_ring_create(ring_name,
139 RING_F_SP_ENQ | RING_F_SC_DEQ);
140 TEST_ASSERT((default_params.r[i][j] != NULL),
141 "Failed to allocate ring");
146 * To create two pseudo-Ethernet ports where the traffic is
147 * switched between them, that is, traffic sent to port 1 is
148 * read back from port 2 and vice-versa
150 for (i = 0; i < NUM_ETH_PAIR; i++) {
151 char dev_name[DEV_NAME_LEN];
154 c1 = default_params.r[i];
155 c2 = default_params.r[PAIR_PORT_INDEX(i)];
157 snprintf(dev_name, DEV_NAME_LEN, "%u-%u", i, i + NUM_ETH_PAIR);
158 p = rte_eth_from_rings(dev_name, c1, MAX_NUM_QUEUE,
159 c2, MAX_NUM_QUEUE, SOCKET0);
160 TEST_ASSERT(p >= 0, "Port creation failed %s", dev_name);
161 err = port_init(p, default_params.mp);
162 TEST_ASSERT(err == 0, "Port init failed %s", dev_name);
163 default_params.port[i] = p;
165 snprintf(dev_name, DEV_NAME_LEN, "%u-%u", i + NUM_ETH_PAIR, i);
166 p = rte_eth_from_rings(dev_name, c2, MAX_NUM_QUEUE,
167 c1, MAX_NUM_QUEUE, SOCKET0);
168 TEST_ASSERT(p > 0, "Port creation failed %s", dev_name);
169 err = port_init(p, default_params.mp);
170 TEST_ASSERT(err == 0, "Port init failed %s", dev_name);
171 default_params.port[PAIR_PORT_INDEX(i)] = p;
181 char name[ETH_NAME_LEN];
183 for (i = 0; i < RTE_DIM(default_params.port); i++) {
184 rte_eth_dev_stop(default_params.port[i]);
185 rte_eth_dev_get_name_by_port(default_params.port[i], name);
186 rte_vdev_uninit(name);
187 for (j = 0; j < RTE_DIM(default_params.r[i]); j++)
188 rte_ring_free(default_params.r[i][j]);
193 testsuite_setup(void)
195 const char *vdev_name = "event_sw0";
197 int err = init_ports();
198 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
200 if (rte_event_dev_count() == 0) {
201 printf("Failed to find a valid event device,"
202 " testing with event_sw0 device\n");
203 err = rte_vdev_init(vdev_name, NULL);
204 TEST_ASSERT(err == 0, "vdev %s creation failed %d\n",
206 event_dev_delete = 1;
211 #define DEVICE_ID_SIZE 64
214 testsuite_teardown(void)
217 rte_mempool_free(default_params.mp);
218 default_params.mp = NULL;
219 if (event_dev_delete)
220 rte_vdev_uninit("event_sw0");
224 tx_adapter_create(void)
227 struct rte_event_dev_info dev_info;
228 struct rte_event_port_conf tx_p_conf;
232 struct rte_event_dev_config config = {
233 .nb_event_queues = 1,
237 struct rte_event_queue_conf wkr_q_conf = {
238 .schedule_type = RTE_SCHED_TYPE_ORDERED,
239 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
240 .nb_atomic_flows = 1024,
241 .nb_atomic_order_sequences = 1024,
244 memset(&tx_p_conf, 0, sizeof(tx_p_conf));
245 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
246 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
247 config.nb_event_port_dequeue_depth =
248 dev_info.max_event_port_dequeue_depth;
249 config.nb_event_port_enqueue_depth =
250 dev_info.max_event_port_enqueue_depth;
251 config.nb_events_limit =
252 dev_info.max_num_events;
254 err = rte_event_dev_configure(TEST_DEV_ID, &config);
255 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
259 err = rte_event_queue_setup(TEST_DEV_ID, 0, &wkr_q_conf);
260 TEST_ASSERT(err == 0, "Event queue setup failed %d\n", err);
262 err = rte_event_port_setup(TEST_DEV_ID, 0, NULL);
263 TEST_ASSERT(err == 0, "Event port setup failed %d\n", err);
265 priority = RTE_EVENT_DEV_PRIORITY_LOWEST;
266 err = rte_event_port_link(TEST_DEV_ID, 0, &queue_id, &priority, 1);
267 TEST_ASSERT(err == 1, "Error linking port %s\n",
268 rte_strerror(rte_errno));
269 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
270 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
272 tx_p_conf.new_event_threshold = dev_info.max_num_events;
273 tx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
274 tx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
275 err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
277 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
283 tx_adapter_free(void)
285 rte_event_eth_tx_adapter_free(TEST_INST_ID);
289 tx_adapter_create_free(void)
292 struct rte_event_dev_info dev_info;
293 struct rte_event_port_conf tx_p_conf;
295 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
296 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
298 tx_p_conf.new_event_threshold = dev_info.max_num_events;
299 tx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
300 tx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
302 err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
304 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
306 err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
308 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
310 err = rte_event_eth_tx_adapter_create(TEST_INST_ID,
311 TEST_DEV_ID, &tx_p_conf);
312 TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
314 err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
315 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
317 err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
318 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
320 err = rte_event_eth_tx_adapter_free(1);
321 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
327 tx_adapter_queue_add_del(void)
332 err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
334 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
337 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
338 rte_eth_dev_count_total(),
340 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
342 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
345 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
347 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
350 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
352 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
355 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
357 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
360 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
362 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
365 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
367 err = rte_event_eth_tx_adapter_queue_add(1, TEST_ETHDEV_ID, -1);
368 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
370 err = rte_event_eth_tx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
371 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
377 tx_adapter_start_stop(void)
381 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
383 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
385 err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
386 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
388 err = rte_event_eth_tx_adapter_stop(TEST_INST_ID);
389 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
391 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
393 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
395 err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
396 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
398 err = rte_event_eth_tx_adapter_stop(TEST_INST_ID);
399 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
401 err = rte_event_eth_tx_adapter_start(1);
403 err = rte_event_eth_tx_adapter_stop(1);
404 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
411 tx_adapter_single(uint16_t port, uint16_t tx_queue_id,
412 struct rte_mbuf *m, uint8_t qid,
415 struct rte_event event;
420 event.queue_id = qid;
421 event.op = RTE_EVENT_OP_NEW;
422 event.event_type = RTE_EVENT_TYPE_CPU;
423 event.sched_type = sched_type;
427 rte_event_eth_tx_adapter_txq_set(m, tx_queue_id);
430 while (rte_event_enqueue_burst(TEST_DEV_ID, 0, &event, 1) != 1) {
436 TEST_ASSERT(l < EDEV_RETRY, "Unable to enqueue to eventdev");
438 while (l++ < EDEV_RETRY) {
441 ret = rte_service_run_iter_on_app_lcore(eid, 0);
442 TEST_ASSERT(ret == 0, "failed to run service %d", ret);
445 ret = rte_service_run_iter_on_app_lcore(tid, 0);
446 TEST_ASSERT(ret == 0, "failed to run service %d", ret);
448 if (rte_eth_rx_burst(TEST_ETHDEV_PAIR_ID, tx_queue_id,
450 TEST_ASSERT_EQUAL(r, m, "mbuf comparison failed"
451 " expected %p received %p", m, r);
456 TEST_ASSERT(0, "Failed to receive packet");
461 tx_adapter_service(void)
463 struct rte_event_eth_tx_adapter_stats stats;
466 uint8_t ev_port, ev_qid;
467 struct rte_mbuf bufs[RING_SIZE];
468 struct rte_mbuf *pbufs[RING_SIZE];
469 struct rte_event_dev_info dev_info;
470 struct rte_event_dev_config dev_conf;
471 struct rte_event_queue_conf qconf;
477 memset(&dev_conf, 0, sizeof(dev_conf));
478 err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
480 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
482 internal_port = !!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
486 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
488 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
490 err = rte_event_eth_tx_adapter_event_port_get(TEST_INST_ID,
492 TEST_ASSERT_SUCCESS(err, "Failed to get event port %d", err);
494 err = rte_event_dev_attr_get(TEST_DEV_ID, RTE_EVENT_DEV_ATTR_PORT_COUNT,
496 TEST_ASSERT_SUCCESS(err, "Port count get failed");
498 err = rte_event_dev_attr_get(TEST_DEV_ID,
499 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &qcnt);
500 TEST_ASSERT_SUCCESS(err, "Queue count get failed");
502 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
503 TEST_ASSERT_SUCCESS(err, "Dev info failed");
505 dev_conf.nb_event_queue_flows = dev_info.max_event_queue_flows;
506 dev_conf.nb_event_port_dequeue_depth =
507 dev_info.max_event_port_dequeue_depth;
508 dev_conf.nb_event_port_enqueue_depth =
509 dev_info.max_event_port_enqueue_depth;
510 dev_conf.nb_events_limit =
511 dev_info.max_num_events;
512 dev_conf.nb_event_queues = qcnt + 1;
513 dev_conf.nb_event_ports = pcnt;
514 err = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
515 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
519 qconf.nb_atomic_flows = dev_info.max_event_queue_flows;
520 qconf.nb_atomic_order_sequences = 32;
521 qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
522 qconf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
523 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
524 err = rte_event_queue_setup(TEST_DEV_ID, ev_qid, &qconf);
525 TEST_ASSERT_SUCCESS(err, "Failed to setup queue %u", ev_qid);
528 * Setup ports again so that the newly added queue is visible
531 for (i = 0; i < pcnt; i++) {
534 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
535 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
540 n_links = rte_event_port_links_get(TEST_DEV_ID, i, queues,
542 TEST_ASSERT(n_links > 0, "Failed to get port links %d\n",
544 err = rte_event_port_setup(TEST_DEV_ID, i, NULL);
545 TEST_ASSERT(err == 0, "Failed to setup port err %d\n", err);
546 err = rte_event_port_link(TEST_DEV_ID, i, queues, priorities,
548 TEST_ASSERT(n_links == err, "Failed to link all queues"
549 " err %s\n", rte_strerror(rte_errno));
552 err = rte_event_port_link(TEST_DEV_ID, ev_port, &ev_qid, NULL, 1);
553 TEST_ASSERT(err == 1, "Failed to link queue port %u",
556 err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
557 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
559 if (!(dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
560 err = rte_event_dev_service_id_get(0, (uint32_t *)&eid);
561 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
563 err = rte_service_runstate_set(eid, 1);
564 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
566 err = rte_service_set_runstate_mapped_check(eid, 0);
567 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
570 err = rte_event_eth_tx_adapter_service_id_get(TEST_INST_ID, &tid);
571 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
573 err = rte_service_runstate_set(tid, 1);
574 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
576 err = rte_service_set_runstate_mapped_check(tid, 0);
577 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
579 err = rte_event_dev_start(TEST_DEV_ID);
580 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
582 for (q = 0; q < MAX_NUM_QUEUE; q++) {
583 for (i = 0; i < RING_SIZE; i++)
585 for (i = 0; i < RING_SIZE; i++) {
587 err = tx_adapter_single(TEST_ETHDEV_ID, q, pbufs[i],
589 RTE_SCHED_TYPE_ORDERED);
590 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
592 for (i = 0; i < RING_SIZE; i++) {
593 TEST_ASSERT_EQUAL(pbufs[i], &bufs[i],
594 "Error: received data does not match"
595 " that transmitted");
599 err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, NULL);
600 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
602 err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, &stats);
603 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
604 TEST_ASSERT_EQUAL(stats.tx_packets, MAX_NUM_QUEUE * RING_SIZE,
605 "stats.tx_packets expected %u got %"PRIu64,
606 MAX_NUM_QUEUE * RING_SIZE,
609 err = rte_event_eth_tx_adapter_stats_reset(TEST_INST_ID);
610 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
612 err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, &stats);
613 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
614 TEST_ASSERT_EQUAL(stats.tx_packets, 0,
615 "stats.tx_packets expected %u got %"PRIu64,
619 err = rte_event_eth_tx_adapter_stats_get(1, &stats);
620 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
622 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
624 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
626 err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
627 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
629 rte_event_dev_stop(TEST_DEV_ID);
635 tx_adapter_dynamic_device(void)
637 uint16_t port_id = rte_eth_dev_count_avail();
638 const char *null_dev[2] = { "eth_null0", "eth_null1" };
639 struct rte_eth_conf dev_conf;
643 memset(&dev_conf, 0, sizeof(dev_conf));
644 for (i = 0; i < RTE_DIM(null_dev); i++) {
645 ret = rte_vdev_init(null_dev[i], NULL);
646 TEST_ASSERT_SUCCESS(ret, "%s Port creation failed %d",
650 ret = tx_adapter_create();
651 TEST_ASSERT_SUCCESS(ret, "Adapter create failed %d",
655 ret = rte_eth_dev_configure(port_id + i, MAX_NUM_QUEUE,
656 MAX_NUM_QUEUE, &dev_conf);
657 TEST_ASSERT_SUCCESS(ret, "Failed to configure device %d", ret);
659 ret = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
661 TEST_ASSERT_SUCCESS(ret, "Failed to add queues %d", ret);
665 for (i = 0; i < RTE_DIM(null_dev); i++) {
666 ret = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
668 TEST_ASSERT_SUCCESS(ret, "Failed to delete queues %d", ret);
673 for (i = 0; i < RTE_DIM(null_dev); i++)
674 rte_vdev_uninit(null_dev[i]);
679 static struct unit_test_suite event_eth_tx_tests = {
680 .setup = testsuite_setup,
681 .teardown = testsuite_teardown,
682 .suite_name = "tx event eth adapter test suite",
684 TEST_CASE_ST(NULL, NULL, tx_adapter_create_free),
685 TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
686 tx_adapter_queue_add_del),
687 TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
688 tx_adapter_start_stop),
689 TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
691 TEST_CASE_ST(NULL, NULL, tx_adapter_dynamic_device),
692 TEST_CASES_END() /**< NULL terminate unit test array */
697 test_event_eth_tx_adapter_common(void)
699 return unit_test_suite_runner(&event_eth_tx_tests);
702 REGISTER_TEST_COMMAND(event_eth_tx_adapter_autotest,
703 test_event_eth_tx_adapter_common);