1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
4 #include <rte_bitmap.h>
5 #include <rte_ethdev.h>
6 #include <rte_eventdev.h>
7 #include <rte_event_eth_rx_adapter.h>
8 #include <rte_event_eth_tx_adapter.h>
9 #include <rte_malloc.h>
12 #include "event_helper.h"
14 static volatile bool eth_core_running;
17 eh_get_enabled_cores(struct rte_bitmap *eth_core_mask)
21 RTE_LCORE_FOREACH(i) {
22 /* Check if this core is enabled in core mask*/
23 if (rte_bitmap_get(eth_core_mask, i)) {
24 /* Found enabled core */
31 static inline unsigned int
32 eh_get_next_eth_core(struct eventmode_conf *em_conf)
34 static unsigned int prev_core = -1;
35 unsigned int next_core;
38 * Make sure we have at least one eth core running, else the following
39 * logic would lead to an infinite loop.
41 if (eh_get_enabled_cores(em_conf->eth_core_mask) == 0) {
42 EH_LOG_ERR("No enabled eth core found");
46 /* Only some cores are marked as eth cores, skip others */
48 /* Get the next core */
49 next_core = rte_get_next_lcore(prev_core, 0, 1);
51 /* Check if we have reached max lcores */
52 if (next_core == RTE_MAX_LCORE)
55 /* Update prev_core */
56 prev_core = next_core;
57 } while (!(rte_bitmap_get(em_conf->eth_core_mask, next_core)));
62 static inline unsigned int
63 eh_get_next_active_core(struct eventmode_conf *em_conf, unsigned int prev_core)
65 unsigned int next_core;
67 /* Get next active core skipping cores reserved as eth cores */
69 /* Get the next core */
70 next_core = rte_get_next_lcore(prev_core, 0, 0);
72 /* Check if we have reached max lcores */
73 if (next_core == RTE_MAX_LCORE)
76 prev_core = next_core;
77 } while (rte_bitmap_get(em_conf->eth_core_mask, next_core));
82 static struct eventdev_params *
83 eh_get_eventdev_params(struct eventmode_conf *em_conf, uint8_t eventdev_id)
87 for (i = 0; i < em_conf->nb_eventdev; i++) {
88 if (em_conf->eventdev_config[i].eventdev_id == eventdev_id)
93 if (i == em_conf->nb_eventdev)
96 return &(em_conf->eventdev_config[i]);
99 eh_dev_has_burst_mode(uint8_t dev_id)
101 struct rte_event_dev_info dev_info;
103 rte_event_dev_info_get(dev_id, &dev_info);
104 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
109 eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
111 int lcore_count, nb_eventdev, nb_eth_dev, ret;
112 struct eventdev_params *eventdev_config;
113 struct rte_event_dev_info dev_info;
115 /* Get the number of event devices */
116 nb_eventdev = rte_event_dev_count();
117 if (nb_eventdev == 0) {
118 EH_LOG_ERR("No event devices detected");
122 if (nb_eventdev != 1) {
123 EH_LOG_ERR("Event mode does not support multiple event devices. "
124 "Please provide only one event device.");
128 /* Get the number of eth devs */
129 nb_eth_dev = rte_eth_dev_count_avail();
130 if (nb_eth_dev == 0) {
131 EH_LOG_ERR("No eth devices detected");
135 /* Get the number of lcores */
136 lcore_count = rte_lcore_count();
138 /* Read event device info */
139 ret = rte_event_dev_info_get(0, &dev_info);
141 EH_LOG_ERR("Failed to read event device info %d", ret);
145 /* Check if enough ports are available */
146 if (dev_info.max_event_ports < 2) {
147 EH_LOG_ERR("Not enough event ports available");
151 /* Get the first event dev conf */
152 eventdev_config = &(em_conf->eventdev_config[0]);
154 /* Save number of queues & ports available */
155 eventdev_config->eventdev_id = 0;
156 eventdev_config->nb_eventqueue = dev_info.max_event_queues;
157 eventdev_config->nb_eventport = dev_info.max_event_ports;
158 eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
160 /* Check if there are more queues than required */
161 if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {
162 /* One queue is reserved for Tx */
163 eventdev_config->nb_eventqueue = nb_eth_dev + 1;
166 /* Check if there are more ports than required */
167 if (eventdev_config->nb_eventport > lcore_count) {
168 /* One port per lcore is enough */
169 eventdev_config->nb_eventport = lcore_count;
172 /* Update the number of event devices */
173 em_conf->nb_eventdev++;
179 eh_set_default_conf_link(struct eventmode_conf *em_conf)
181 struct eventdev_params *eventdev_config;
182 struct eh_event_link_info *link;
183 unsigned int lcore_id = -1;
187 * Create a 1:1 mapping from event ports to cores. If the number
188 * of event ports is lesser than the cores, some cores won't
189 * execute worker. If there are more event ports, then some ports
195 * The event queue-port mapping is done according to the link. Since
196 * we are falling back to the default link config, enabling
197 * "all_ev_queue_to_ev_port" mode flag. This will map all queues
200 em_conf->ext_params.all_ev_queue_to_ev_port = 1;
202 /* Get first event dev conf */
203 eventdev_config = &(em_conf->eventdev_config[0]);
205 /* Loop through the ports */
206 for (i = 0; i < eventdev_config->nb_eventport; i++) {
208 /* Get next active core id */
209 lcore_id = eh_get_next_active_core(em_conf,
212 if (lcore_id == RTE_MAX_LCORE) {
213 /* Reached max cores */
217 /* Save the current combination as one link */
220 link_index = em_conf->nb_link;
222 /* Get the corresponding link */
223 link = &(em_conf->link[link_index]);
226 link->eventdev_id = eventdev_config->eventdev_id;
227 link->event_port_id = i;
228 link->lcore_id = lcore_id;
231 * Don't set eventq_id as by default all queues
232 * need to be mapped to the port, which is controlled
233 * by the operating mode.
236 /* Update number of links */
244 eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
246 struct rx_adapter_connection_info *conn;
247 struct eventdev_params *eventdev_config;
248 struct rx_adapter_conf *adapter;
249 bool single_ev_queue = false;
256 /* Create one adapter with eth queues mapped to event queue(s) */
258 if (em_conf->nb_eventdev == 0) {
259 EH_LOG_ERR("No event devs registered");
263 /* Get the number of eth devs */
264 nb_eth_dev = rte_eth_dev_count_avail();
266 /* Use the first event dev */
267 eventdev_config = &(em_conf->eventdev_config[0]);
269 /* Get eventdev ID */
270 eventdev_id = eventdev_config->eventdev_id;
273 /* Get adapter conf */
274 adapter = &(em_conf->rx_adapter[adapter_id]);
276 /* Set adapter conf */
277 adapter->eventdev_id = eventdev_id;
278 adapter->adapter_id = adapter_id;
279 adapter->rx_core_id = eh_get_next_eth_core(em_conf);
282 * Map all queues of eth device (port) to an event queue. If there
283 * are more event queues than eth ports then create 1:1 mapping.
284 * Otherwise map all eth ports to a single event queue.
286 if (nb_eth_dev > eventdev_config->nb_eventqueue)
287 single_ev_queue = true;
289 for (i = 0; i < nb_eth_dev; i++) {
291 /* Use only the ports enabled */
292 if ((em_conf->eth_portmask & (1 << i)) == 0)
295 /* Get the connection id */
296 conn_id = adapter->nb_connections;
298 /* Get the connection */
299 conn = &(adapter->conn[conn_id]);
301 /* Set mapping between eth ports & event queues*/
303 conn->eventq_id = single_ev_queue ? 0 : i;
305 /* Add all eth queues eth port to event queue */
306 conn->ethdev_rx_qid = -1;
308 /* Update no of connections */
309 adapter->nb_connections++;
313 /* We have setup one adapter */
314 em_conf->nb_rx_adapter = 1;
320 eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)
322 struct tx_adapter_connection_info *conn;
323 struct eventdev_params *eventdev_config;
324 struct tx_adapter_conf *tx_adapter;
332 * Create one Tx adapter with all eth queues mapped to event queues
336 if (em_conf->nb_eventdev == 0) {
337 EH_LOG_ERR("No event devs registered");
341 /* Get the number of eth devs */
342 nb_eth_dev = rte_eth_dev_count_avail();
344 /* Use the first event dev */
345 eventdev_config = &(em_conf->eventdev_config[0]);
347 /* Get eventdev ID */
348 eventdev_id = eventdev_config->eventdev_id;
351 /* Get adapter conf */
352 tx_adapter = &(em_conf->tx_adapter[adapter_id]);
354 /* Set adapter conf */
355 tx_adapter->eventdev_id = eventdev_id;
356 tx_adapter->adapter_id = adapter_id;
358 /* TODO: Tx core is required only when internal port is not present */
359 tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);
362 * Application uses one event queue per adapter for submitting
363 * packets for Tx. Reserve the last queue available and decrement
364 * the total available event queues for this
367 /* Queue numbers start at 0 */
368 tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;
371 * Map all Tx queues of the eth device (port) to the event device.
374 /* Set defaults for connections */
377 * One eth device (port) is one connection. Map all Tx queues
378 * of the device to the Tx adapter.
381 for (i = 0; i < nb_eth_dev; i++) {
383 /* Use only the ports enabled */
384 if ((em_conf->eth_portmask & (1 << i)) == 0)
387 /* Get the connection id */
388 conn_id = tx_adapter->nb_connections;
390 /* Get the connection */
391 conn = &(tx_adapter->conn[conn_id]);
393 /* Add ethdev to connections */
396 /* Add all eth tx queues to adapter */
397 conn->ethdev_tx_qid = -1;
399 /* Update no of connections */
400 tx_adapter->nb_connections++;
403 /* We have setup one adapter */
404 em_conf->nb_tx_adapter = 1;
409 eh_validate_conf(struct eventmode_conf *em_conf)
414 * Check if event devs are specified. Else probe the event devices
415 * and initialize the config with all ports & queues available
417 if (em_conf->nb_eventdev == 0) {
418 ret = eh_set_default_conf_eventdev(em_conf);
424 * Check if links are specified. Else generate a default config for
425 * the event ports used.
427 if (em_conf->nb_link == 0) {
428 ret = eh_set_default_conf_link(em_conf);
434 * Check if rx adapters are specified. Else generate a default config
435 * with one rx adapter and all eth queues - event queue mapped.
437 if (em_conf->nb_rx_adapter == 0) {
438 ret = eh_set_default_conf_rx_adapter(em_conf);
444 * Check if tx adapters are specified. Else generate a default config
445 * with one tx adapter.
447 if (em_conf->nb_tx_adapter == 0) {
448 ret = eh_set_default_conf_tx_adapter(em_conf);
457 eh_initialize_eventdev(struct eventmode_conf *em_conf)
459 struct rte_event_queue_conf eventq_conf = {0};
460 struct rte_event_dev_info evdev_default_conf;
461 struct rte_event_dev_config eventdev_conf;
462 struct eventdev_params *eventdev_config;
463 int nb_eventdev = em_conf->nb_eventdev;
464 struct eh_event_link_info *link;
465 uint8_t *queue = NULL;
471 for (i = 0; i < nb_eventdev; i++) {
473 /* Get eventdev config */
474 eventdev_config = &(em_conf->eventdev_config[i]);
476 /* Get event dev ID */
477 eventdev_id = eventdev_config->eventdev_id;
479 /* Get the number of queues */
480 nb_eventqueue = eventdev_config->nb_eventqueue;
482 /* Reset the default conf */
483 memset(&evdev_default_conf, 0,
484 sizeof(struct rte_event_dev_info));
486 /* Get default conf of eventdev */
487 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
490 "Error in getting event device info[devID:%d]",
495 memset(&eventdev_conf, 0, sizeof(struct rte_event_dev_config));
496 eventdev_conf.nb_events_limit =
497 evdev_default_conf.max_num_events;
498 eventdev_conf.nb_event_queues = nb_eventqueue;
499 eventdev_conf.nb_event_ports =
500 eventdev_config->nb_eventport;
501 eventdev_conf.nb_event_queue_flows =
502 evdev_default_conf.max_event_queue_flows;
503 eventdev_conf.nb_event_port_dequeue_depth =
504 evdev_default_conf.max_event_port_dequeue_depth;
505 eventdev_conf.nb_event_port_enqueue_depth =
506 evdev_default_conf.max_event_port_enqueue_depth;
508 /* Configure event device */
509 ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
511 EH_LOG_ERR("Error in configuring event device");
515 /* Configure event queues */
516 for (j = 0; j < nb_eventqueue; j++) {
518 memset(&eventq_conf, 0,
519 sizeof(struct rte_event_queue_conf));
521 /* Per event dev queues can be ATQ or SINGLE LINK */
522 eventq_conf.event_queue_cfg =
523 eventdev_config->ev_queue_mode;
525 * All queues need to be set with sched_type as
526 * schedule type for the application stage. One queue
527 * would be reserved for the final eth tx stage. This
528 * will be an atomic queue.
530 if (j == nb_eventqueue-1) {
531 eventq_conf.schedule_type =
532 RTE_SCHED_TYPE_ATOMIC;
534 eventq_conf.schedule_type =
535 em_conf->ext_params.sched_type;
538 /* Set max atomic flows to 1024 */
539 eventq_conf.nb_atomic_flows = 1024;
540 eventq_conf.nb_atomic_order_sequences = 1024;
542 /* Setup the queue */
543 ret = rte_event_queue_setup(eventdev_id, j,
546 EH_LOG_ERR("Failed to setup event queue %d",
552 /* Configure event ports */
553 for (j = 0; j < eventdev_config->nb_eventport; j++) {
554 ret = rte_event_port_setup(eventdev_id, j, NULL);
556 EH_LOG_ERR("Failed to setup event port %d",
563 /* Make event queue - event port link */
564 for (j = 0; j < em_conf->nb_link; j++) {
567 link = &(em_conf->link[j]);
569 /* Get event dev ID */
570 eventdev_id = link->eventdev_id;
573 * If "all_ev_queue_to_ev_port" params flag is selected, all
574 * queues need to be mapped to the port.
576 if (em_conf->ext_params.all_ev_queue_to_ev_port)
579 queue = &(link->eventq_id);
581 /* Link queue to port */
582 ret = rte_event_port_link(eventdev_id, link->event_port_id,
585 EH_LOG_ERR("Failed to link event port %d", ret);
590 /* Start event devices */
591 for (i = 0; i < nb_eventdev; i++) {
593 /* Get eventdev config */
594 eventdev_config = &(em_conf->eventdev_config[i]);
596 ret = rte_event_dev_start(eventdev_config->eventdev_id);
598 EH_LOG_ERR("Failed to start event device %d, %d",
607 eh_rx_adapter_configure(struct eventmode_conf *em_conf,
608 struct rx_adapter_conf *adapter)
610 struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
611 struct rte_event_dev_info evdev_default_conf = {0};
612 struct rte_event_port_conf port_conf = {0};
613 struct rx_adapter_connection_info *conn;
619 /* Get event dev ID */
620 eventdev_id = adapter->eventdev_id;
622 /* Get default configuration of event dev */
623 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
625 EH_LOG_ERR("Failed to get event dev info %d", ret);
629 /* Setup port conf */
630 port_conf.new_event_threshold = 1200;
631 port_conf.dequeue_depth =
632 evdev_default_conf.max_event_port_dequeue_depth;
633 port_conf.enqueue_depth =
634 evdev_default_conf.max_event_port_enqueue_depth;
636 /* Create Rx adapter */
637 ret = rte_event_eth_rx_adapter_create(adapter->adapter_id,
638 adapter->eventdev_id, &port_conf);
640 EH_LOG_ERR("Failed to create rx adapter %d", ret);
644 /* Setup various connections in the adapter */
645 for (j = 0; j < adapter->nb_connections; j++) {
647 conn = &(adapter->conn[j]);
649 /* Setup queue conf */
650 queue_conf.ev.queue_id = conn->eventq_id;
651 queue_conf.ev.sched_type = em_conf->ext_params.sched_type;
652 queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
654 /* Add queue to the adapter */
655 ret = rte_event_eth_rx_adapter_queue_add(adapter->adapter_id,
656 conn->ethdev_id, conn->ethdev_rx_qid,
659 EH_LOG_ERR("Failed to add eth queue to rx adapter %d",
665 /* Get the service ID used by rx adapter */
666 ret = rte_event_eth_rx_adapter_service_id_get(adapter->adapter_id,
668 if (ret != -ESRCH && ret < 0) {
669 EH_LOG_ERR("Failed to get service id used by rx adapter %d",
674 rte_service_set_runstate_mapped_check(service_id, 0);
677 ret = rte_event_eth_rx_adapter_start(adapter->adapter_id);
679 EH_LOG_ERR("Failed to start rx adapter %d", ret);
687 eh_initialize_rx_adapter(struct eventmode_conf *em_conf)
689 struct rx_adapter_conf *adapter;
692 /* Configure rx adapters */
693 for (i = 0; i < em_conf->nb_rx_adapter; i++) {
694 adapter = &(em_conf->rx_adapter[i]);
695 ret = eh_rx_adapter_configure(em_conf, adapter);
697 EH_LOG_ERR("Failed to configure rx adapter %d", ret);
705 eh_start_worker_eth_core(struct eventmode_conf *conf, uint32_t lcore_id)
707 uint32_t service_id[EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE];
708 struct rx_adapter_conf *rx_adapter;
709 struct tx_adapter_conf *tx_adapter;
710 int service_count = 0;
715 EH_LOG_INFO("Entering eth_core processing on lcore %u", lcore_id);
718 * Parse adapter config to check which of all Rx adapters need
719 * to be handled by this core.
721 for (i = 0; i < conf->nb_rx_adapter; i++) {
722 /* Check if we have exceeded the max allowed */
723 if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE) {
725 "Exceeded the max allowed adapters per rx core");
729 rx_adapter = &(conf->rx_adapter[i]);
730 if (rx_adapter->rx_core_id != lcore_id)
733 /* Adapter is handled by this core */
734 adapter_id = rx_adapter->adapter_id;
736 /* Get the service ID for the adapters */
737 ret = rte_event_eth_rx_adapter_service_id_get(adapter_id,
738 &(service_id[service_count]));
740 if (ret != -ESRCH && ret < 0) {
742 "Failed to get service id used by rx adapter");
746 /* Update service count */
751 * Parse adapter config to see which of all Tx adapters need
752 * to be handled by this core.
754 for (i = 0; i < conf->nb_tx_adapter; i++) {
755 /* Check if we have exceeded the max allowed */
756 if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_TX_CORE) {
758 "Exceeded the max allowed adapters per tx core");
762 tx_adapter = &conf->tx_adapter[i];
763 if (tx_adapter->tx_core_id != lcore_id)
766 /* Adapter is handled by this core */
767 adapter_id = tx_adapter->adapter_id;
769 /* Get the service ID for the adapters */
770 ret = rte_event_eth_tx_adapter_service_id_get(adapter_id,
771 &(service_id[service_count]));
773 if (ret != -ESRCH && ret < 0) {
775 "Failed to get service id used by tx adapter");
779 /* Update service count */
783 eth_core_running = true;
785 while (eth_core_running) {
786 for (i = 0; i < service_count; i++) {
787 /* Initiate adapter service */
788 rte_service_run_iter_on_app_lcore(service_id[i], 0);
796 eh_stop_worker_eth_core(void)
798 if (eth_core_running) {
799 EH_LOG_INFO("Stopping eth cores");
800 eth_core_running = false;
805 static struct eh_app_worker_params *
806 eh_find_worker(uint32_t lcore_id, struct eh_conf *conf,
807 struct eh_app_worker_params *app_wrkrs, uint8_t nb_wrkr_param)
809 struct eh_app_worker_params curr_conf = { {{0} }, NULL};
810 struct eh_event_link_info *link = NULL;
811 struct eh_app_worker_params *tmp_wrkr;
812 struct eventmode_conf *em_conf;
816 /* Get eventmode config */
817 em_conf = conf->mode_params;
820 * Use event device from the first lcore-event link.
822 * Assumption: All lcore-event links tied to a core are using the
823 * same event device. In other words, one core would be polling on
824 * queues of a single event device only.
827 /* Get a link for this lcore */
828 for (i = 0; i < em_conf->nb_link; i++) {
829 link = &(em_conf->link[i]);
830 if (link->lcore_id == lcore_id)
835 EH_LOG_ERR("No valid link found for lcore %d", lcore_id);
839 /* Get event dev ID */
840 eventdev_id = link->eventdev_id;
842 /* Populate the curr_conf with the capabilities */
844 /* Check for burst mode */
845 if (eh_dev_has_burst_mode(eventdev_id))
846 curr_conf.cap.burst = EH_RX_TYPE_BURST;
848 curr_conf.cap.burst = EH_RX_TYPE_NON_BURST;
850 /* Parse the passed list and see if we have matching capabilities */
852 /* Initialize the pointer used to traverse the list */
853 tmp_wrkr = app_wrkrs;
855 for (i = 0; i < nb_wrkr_param; i++, tmp_wrkr++) {
857 /* Skip this if capabilities are not matching */
858 if (tmp_wrkr->cap.u64 != curr_conf.cap.u64)
861 /* If the checks pass, we have a match */
869 eh_verify_match_worker(struct eh_app_worker_params *match_wrkr)
871 /* Verify registered worker */
872 if (match_wrkr->worker_thread == NULL) {
873 EH_LOG_ERR("No worker registered");
882 eh_get_event_lcore_links(uint32_t lcore_id, struct eh_conf *conf,
883 struct eh_event_link_info **links)
885 struct eh_event_link_info *link_cache;
886 struct eventmode_conf *em_conf = NULL;
887 struct eh_event_link_info *link;
888 uint8_t lcore_nb_link = 0;
889 size_t single_link_size;
894 if (conf == NULL || links == NULL) {
895 EH_LOG_ERR("Invalid args");
899 /* Get eventmode conf */
900 em_conf = conf->mode_params;
902 if (em_conf == NULL) {
903 EH_LOG_ERR("Invalid event mode parameters");
907 /* Get the number of links registered */
908 for (i = 0; i < em_conf->nb_link; i++) {
911 link = &(em_conf->link[i]);
913 /* Check if we have link intended for this lcore */
914 if (link->lcore_id == lcore_id) {
916 /* Update the number of links for this core */
922 /* Compute size of one entry to be copied */
923 single_link_size = sizeof(struct eh_event_link_info);
925 /* Compute size of the buffer required */
926 cache_size = lcore_nb_link * sizeof(struct eh_event_link_info);
928 /* Compute size of the buffer required */
929 link_cache = calloc(1, cache_size);
931 /* Get the number of links registered */
932 for (i = 0; i < em_conf->nb_link; i++) {
935 link = &(em_conf->link[i]);
937 /* Check if we have link intended for this lcore */
938 if (link->lcore_id == lcore_id) {
941 memcpy(&link_cache[index], link, single_link_size);
948 /* Update the links for application to use the cached links */
951 /* Return the number of cached links */
952 return lcore_nb_link;
956 eh_tx_adapter_configure(struct eventmode_conf *em_conf,
957 struct tx_adapter_conf *adapter)
959 struct rte_event_dev_info evdev_default_conf = {0};
960 struct rte_event_port_conf port_conf = {0};
961 struct tx_adapter_connection_info *conn;
962 struct eventdev_params *eventdev_config;
963 uint8_t tx_port_id = 0;
968 /* Get event dev ID */
969 eventdev_id = adapter->eventdev_id;
971 /* Get event device conf */
972 eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id);
974 /* Create Tx adapter */
976 /* Get default configuration of event dev */
977 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
979 EH_LOG_ERR("Failed to get event dev info %d", ret);
983 /* Setup port conf */
984 port_conf.new_event_threshold =
985 evdev_default_conf.max_num_events;
986 port_conf.dequeue_depth =
987 evdev_default_conf.max_event_port_dequeue_depth;
988 port_conf.enqueue_depth =
989 evdev_default_conf.max_event_port_enqueue_depth;
992 ret = rte_event_eth_tx_adapter_create(adapter->adapter_id,
993 adapter->eventdev_id, &port_conf);
995 EH_LOG_ERR("Failed to create tx adapter %d", ret);
999 /* Setup various connections in the adapter */
1000 for (j = 0; j < adapter->nb_connections; j++) {
1002 /* Get connection */
1003 conn = &(adapter->conn[j]);
1005 /* Add queue to the adapter */
1006 ret = rte_event_eth_tx_adapter_queue_add(adapter->adapter_id,
1007 conn->ethdev_id, conn->ethdev_tx_qid);
1009 EH_LOG_ERR("Failed to add eth queue to tx adapter %d",
1015 /* Setup Tx queue & port */
1017 /* Get event port used by the adapter */
1018 ret = rte_event_eth_tx_adapter_event_port_get(
1019 adapter->adapter_id, &tx_port_id);
1021 EH_LOG_ERR("Failed to get tx adapter port id %d", ret);
1026 * Tx event queue is reserved for Tx adapter. Unlink this queue
1027 * from all other ports
1030 for (j = 0; j < eventdev_config->nb_eventport; j++) {
1031 rte_event_port_unlink(eventdev_id, j,
1032 &(adapter->tx_ev_queue), 1);
1035 /* Link Tx event queue to Tx port */
1036 ret = rte_event_port_link(eventdev_id, tx_port_id,
1037 &(adapter->tx_ev_queue), NULL, 1);
1039 EH_LOG_ERR("Failed to link event queue to port");
1043 /* Get the service ID used by Tx adapter */
1044 ret = rte_event_eth_tx_adapter_service_id_get(adapter->adapter_id,
1046 if (ret != -ESRCH && ret < 0) {
1047 EH_LOG_ERR("Failed to get service id used by tx adapter %d",
1052 rte_service_set_runstate_mapped_check(service_id, 0);
1055 ret = rte_event_eth_tx_adapter_start(adapter->adapter_id);
1057 EH_LOG_ERR("Failed to start tx adapter %d", ret);
1065 eh_initialize_tx_adapter(struct eventmode_conf *em_conf)
1067 struct tx_adapter_conf *adapter;
1070 /* Configure Tx adapters */
1071 for (i = 0; i < em_conf->nb_tx_adapter; i++) {
1072 adapter = &(em_conf->tx_adapter[i]);
1073 ret = eh_tx_adapter_configure(em_conf, adapter);
1075 EH_LOG_ERR("Failed to configure tx adapter %d", ret);
1083 eh_display_operating_mode(struct eventmode_conf *em_conf)
1085 char sched_types[][32] = {
1086 "RTE_SCHED_TYPE_ORDERED",
1087 "RTE_SCHED_TYPE_ATOMIC",
1088 "RTE_SCHED_TYPE_PARALLEL",
1090 EH_LOG_INFO("Operating mode:");
1092 EH_LOG_INFO("\tScheduling type: \t%s",
1093 sched_types[em_conf->ext_params.sched_type]);
1099 eh_display_event_dev_conf(struct eventmode_conf *em_conf)
1101 char queue_mode[][32] = {
1103 "ATQ (ALL TYPE QUEUE)",
1106 char print_buf[256] = { 0 };
1109 EH_LOG_INFO("Event Device Configuration:");
1111 for (i = 0; i < em_conf->nb_eventdev; i++) {
1113 "\tDev ID: %-2d \tQueues: %-2d \tPorts: %-2d",
1114 em_conf->eventdev_config[i].eventdev_id,
1115 em_conf->eventdev_config[i].nb_eventqueue,
1116 em_conf->eventdev_config[i].nb_eventport);
1117 sprintf(print_buf + strlen(print_buf),
1119 queue_mode[em_conf->eventdev_config[i].ev_queue_mode]);
1120 EH_LOG_INFO("%s", print_buf);
1126 eh_display_rx_adapter_conf(struct eventmode_conf *em_conf)
1128 int nb_rx_adapter = em_conf->nb_rx_adapter;
1129 struct rx_adapter_connection_info *conn;
1130 struct rx_adapter_conf *adapter;
1131 char print_buf[256] = { 0 };
1134 EH_LOG_INFO("Rx adapters configured: %d", nb_rx_adapter);
1136 for (i = 0; i < nb_rx_adapter; i++) {
1137 adapter = &(em_conf->rx_adapter[i]);
1139 "\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d"
1141 adapter->adapter_id,
1142 adapter->nb_connections,
1143 adapter->eventdev_id,
1144 adapter->rx_core_id);
1146 for (j = 0; j < adapter->nb_connections; j++) {
1147 conn = &(adapter->conn[j]);
1150 "\t\tEthdev ID: %-2d", conn->ethdev_id);
1152 if (conn->ethdev_rx_qid == -1)
1153 sprintf(print_buf + strlen(print_buf),
1154 "\tEth rx queue: %-2s", "ALL");
1156 sprintf(print_buf + strlen(print_buf),
1157 "\tEth rx queue: %-2d",
1158 conn->ethdev_rx_qid);
1160 sprintf(print_buf + strlen(print_buf),
1161 "\tEvent queue: %-2d", conn->eventq_id);
1162 EH_LOG_INFO("%s", print_buf);
1169 eh_display_tx_adapter_conf(struct eventmode_conf *em_conf)
1171 int nb_tx_adapter = em_conf->nb_tx_adapter;
1172 struct tx_adapter_connection_info *conn;
1173 struct tx_adapter_conf *adapter;
1174 char print_buf[256] = { 0 };
1177 EH_LOG_INFO("Tx adapters configured: %d", nb_tx_adapter);
1179 for (i = 0; i < nb_tx_adapter; i++) {
1180 adapter = &(em_conf->tx_adapter[i]);
1182 "\tTx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
1183 adapter->adapter_id,
1184 adapter->nb_connections,
1185 adapter->eventdev_id);
1186 if (adapter->tx_core_id == (uint32_t)-1)
1187 sprintf(print_buf + strlen(print_buf),
1188 "\tTx core: %-2s", "[INTERNAL PORT]");
1189 else if (adapter->tx_core_id == RTE_MAX_LCORE)
1190 sprintf(print_buf + strlen(print_buf),
1191 "\tTx core: %-2s", "[NONE]");
1193 sprintf(print_buf + strlen(print_buf),
1194 "\tTx core: %-2d,\tInput event queue: %-2d",
1195 adapter->tx_core_id, adapter->tx_ev_queue);
1197 EH_LOG_INFO("%s", print_buf);
1199 for (j = 0; j < adapter->nb_connections; j++) {
1200 conn = &(adapter->conn[j]);
1203 "\t\tEthdev ID: %-2d", conn->ethdev_id);
1205 if (conn->ethdev_tx_qid == -1)
1206 sprintf(print_buf + strlen(print_buf),
1207 "\tEth tx queue: %-2s", "ALL");
1209 sprintf(print_buf + strlen(print_buf),
1210 "\tEth tx queue: %-2d",
1211 conn->ethdev_tx_qid);
1212 EH_LOG_INFO("%s", print_buf);
1219 eh_display_link_conf(struct eventmode_conf *em_conf)
1221 struct eh_event_link_info *link;
1222 char print_buf[256] = { 0 };
1225 EH_LOG_INFO("Links configured: %d", em_conf->nb_link);
1227 for (i = 0; i < em_conf->nb_link; i++) {
1228 link = &(em_conf->link[i]);
1231 "\tEvent dev ID: %-2d\tEvent port: %-2d",
1233 link->event_port_id);
1235 if (em_conf->ext_params.all_ev_queue_to_ev_port)
1236 sprintf(print_buf + strlen(print_buf),
1237 "Event queue: %-2s\t", "ALL");
1239 sprintf(print_buf + strlen(print_buf),
1240 "Event queue: %-2d\t", link->eventq_id);
1242 sprintf(print_buf + strlen(print_buf),
1243 "Lcore: %-2d", link->lcore_id);
1244 EH_LOG_INFO("%s", print_buf);
1250 eh_display_conf(struct eh_conf *conf)
1252 struct eventmode_conf *em_conf;
1255 EH_LOG_ERR("Invalid event helper configuration");
1259 if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1262 if (conf->mode_params == NULL) {
1263 EH_LOG_ERR("Invalid event mode parameters");
1267 /* Get eventmode conf */
1268 em_conf = (struct eventmode_conf *)(conf->mode_params);
1270 /* Display user exposed operating modes */
1271 eh_display_operating_mode(em_conf);
1273 /* Display event device conf */
1274 eh_display_event_dev_conf(em_conf);
1276 /* Display Rx adapter conf */
1277 eh_display_rx_adapter_conf(em_conf);
1279 /* Display Tx adapter conf */
1280 eh_display_tx_adapter_conf(em_conf);
1282 /* Display event-lcore link */
1283 eh_display_link_conf(em_conf);
1287 eh_devs_init(struct eh_conf *conf)
1289 struct eventmode_conf *em_conf;
1294 EH_LOG_ERR("Invalid event helper configuration");
1298 if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1301 if (conf->mode_params == NULL) {
1302 EH_LOG_ERR("Invalid event mode parameters");
1306 /* Get eventmode conf */
1307 em_conf = conf->mode_params;
1309 /* Eventmode conf would need eth portmask */
1310 em_conf->eth_portmask = conf->eth_portmask;
1312 /* Validate the requested config */
1313 ret = eh_validate_conf(em_conf);
1315 EH_LOG_ERR("Failed to validate the requested config %d", ret);
1319 /* Display the current configuration */
1320 eh_display_conf(conf);
1322 /* Stop eth devices before setting up adapter */
1323 RTE_ETH_FOREACH_DEV(port_id) {
1325 /* Use only the ports enabled */
1326 if ((conf->eth_portmask & (1 << port_id)) == 0)
1329 rte_eth_dev_stop(port_id);
1332 /* Setup eventdev */
1333 ret = eh_initialize_eventdev(em_conf);
1335 EH_LOG_ERR("Failed to initialize event dev %d", ret);
1339 /* Setup Rx adapter */
1340 ret = eh_initialize_rx_adapter(em_conf);
1342 EH_LOG_ERR("Failed to initialize rx adapter %d", ret);
1346 /* Setup Tx adapter */
1347 ret = eh_initialize_tx_adapter(em_conf);
1349 EH_LOG_ERR("Failed to initialize tx adapter %d", ret);
1353 /* Start eth devices after setting up adapter */
1354 RTE_ETH_FOREACH_DEV(port_id) {
1356 /* Use only the ports enabled */
1357 if ((conf->eth_portmask & (1 << port_id)) == 0)
1360 ret = rte_eth_dev_start(port_id);
1362 EH_LOG_ERR("Failed to start eth dev %d, %d",
1372 eh_devs_uninit(struct eh_conf *conf)
1374 struct eventmode_conf *em_conf;
1379 EH_LOG_ERR("Invalid event helper configuration");
1383 if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1386 if (conf->mode_params == NULL) {
1387 EH_LOG_ERR("Invalid event mode parameters");
1391 /* Get eventmode conf */
1392 em_conf = conf->mode_params;
1394 /* Stop and release rx adapters */
1395 for (i = 0; i < em_conf->nb_rx_adapter; i++) {
1397 id = em_conf->rx_adapter[i].adapter_id;
1398 ret = rte_event_eth_rx_adapter_stop(id);
1400 EH_LOG_ERR("Failed to stop rx adapter %d", ret);
1404 for (j = 0; j < em_conf->rx_adapter[i].nb_connections; j++) {
1406 ret = rte_event_eth_rx_adapter_queue_del(id,
1407 em_conf->rx_adapter[i].conn[j].ethdev_id, -1);
1410 "Failed to remove rx adapter queues %d",
1416 ret = rte_event_eth_rx_adapter_free(id);
1418 EH_LOG_ERR("Failed to free rx adapter %d", ret);
1423 /* Stop and release event devices */
1424 for (i = 0; i < em_conf->nb_eventdev; i++) {
1426 id = em_conf->eventdev_config[i].eventdev_id;
1427 rte_event_dev_stop(id);
1429 ret = rte_event_dev_close(id);
1431 EH_LOG_ERR("Failed to close event dev %d, %d", id, ret);
1436 /* Stop and release tx adapters */
1437 for (i = 0; i < em_conf->nb_tx_adapter; i++) {
1439 id = em_conf->tx_adapter[i].adapter_id;
1440 ret = rte_event_eth_tx_adapter_stop(id);
1442 EH_LOG_ERR("Failed to stop tx adapter %d", ret);
1446 for (j = 0; j < em_conf->tx_adapter[i].nb_connections; j++) {
1448 ret = rte_event_eth_tx_adapter_queue_del(id,
1449 em_conf->tx_adapter[i].conn[j].ethdev_id, -1);
1452 "Failed to remove tx adapter queues %d",
1458 ret = rte_event_eth_tx_adapter_free(id);
1460 EH_LOG_ERR("Failed to free tx adapter %d", ret);
1469 eh_launch_worker(struct eh_conf *conf, struct eh_app_worker_params *app_wrkr,
1470 uint8_t nb_wrkr_param)
1472 struct eh_app_worker_params *match_wrkr;
1473 struct eh_event_link_info *links = NULL;
1474 struct eventmode_conf *em_conf;
1479 EH_LOG_ERR("Invalid event helper configuration");
1483 if (conf->mode_params == NULL) {
1484 EH_LOG_ERR("Invalid event mode parameters");
1488 /* Get eventmode conf */
1489 em_conf = conf->mode_params;
1492 lcore_id = rte_lcore_id();
1494 /* Check if this is eth core */
1495 if (rte_bitmap_get(em_conf->eth_core_mask, lcore_id)) {
1496 eh_start_worker_eth_core(em_conf, lcore_id);
1500 if (app_wrkr == NULL || nb_wrkr_param == 0) {
1501 EH_LOG_ERR("Invalid args");
1506 * This is a regular worker thread. The application registers
1507 * multiple workers with various capabilities. Run worker
1508 * based on the selected capabilities of the event
1509 * device configured.
1512 /* Get the first matching worker for the event device */
1513 match_wrkr = eh_find_worker(lcore_id, conf, app_wrkr, nb_wrkr_param);
1514 if (match_wrkr == NULL) {
1515 EH_LOG_ERR("Failed to match worker registered for lcore %d",
1517 goto clean_and_exit;
1520 /* Verify sanity of the matched worker */
1521 if (eh_verify_match_worker(match_wrkr) != 1) {
1522 EH_LOG_ERR("Failed to validate the matched worker");
1523 goto clean_and_exit;
1526 /* Get worker links */
1527 nb_links = eh_get_event_lcore_links(lcore_id, conf, &links);
1529 /* Launch the worker thread */
1530 match_wrkr->worker_thread(links, nb_links);
1532 /* Free links info memory */
1537 /* Flag eth_cores to stop, if started */
1538 eh_stop_worker_eth_core();
1542 eh_get_tx_queue(struct eh_conf *conf, uint8_t eventdev_id)
1544 struct eventdev_params *eventdev_config;
1545 struct eventmode_conf *em_conf;
1548 EH_LOG_ERR("Invalid event helper configuration");
1552 if (conf->mode_params == NULL) {
1553 EH_LOG_ERR("Invalid event mode parameters");
1557 /* Get eventmode conf */
1558 em_conf = conf->mode_params;
1560 /* Get event device conf */
1561 eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id);
1563 if (eventdev_config == NULL) {
1564 EH_LOG_ERR("Failed to read eventdev config");
1569 * The last queue is reserved to be used as atomic queue for the
1570 * last stage (eth packet tx stage)
1572 return eventdev_config->nb_eventqueue - 1;