examples/ipsec-secgw: add event helper config init/uninit
[dpdk.git] / examples / ipsec-secgw / event_helper.c
index fca1e08..0854fc2 100644 (file)
@@ -11,6 +11,8 @@
 
 #include "event_helper.h"
 
+static volatile bool eth_core_running;
+
 static int
 eh_get_enabled_cores(struct rte_bitmap *eth_core_mask)
 {
@@ -93,6 +95,49 @@ eh_get_eventdev_params(struct eventmode_conf *em_conf, uint8_t eventdev_id)
 
        return &(em_conf->eventdev_config[i]);
 }
+
+static inline bool
+eh_dev_has_rx_internal_port(uint8_t eventdev_id)
+{
+       bool flag = true;
+       int j;
+
+       RTE_ETH_FOREACH_DEV(j) {
+               uint32_t caps = 0;
+
+               rte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps);
+               if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
+                       flag = false;
+       }
+       return flag;
+}
+
+static inline bool
+eh_dev_has_tx_internal_port(uint8_t eventdev_id)
+{
+       bool flag = true;
+       int j;
+
+       RTE_ETH_FOREACH_DEV(j) {
+               uint32_t caps = 0;
+
+               rte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps);
+               if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
+                       flag = false;
+       }
+       return flag;
+}
+
+static inline bool
+eh_dev_has_burst_mode(uint8_t dev_id)
+{
+       struct rte_event_dev_info dev_info;
+
+       rte_event_dev_info_get(dev_id, &dev_info);
+       return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
+                       true : false;
+}
+
 static int
 eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 {
@@ -163,6 +208,42 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
        return 0;
 }
 
+static void
+eh_do_capability_check(struct eventmode_conf *em_conf)
+{
+       struct eventdev_params *eventdev_config;
+       int all_internal_ports = 1;
+       uint32_t eventdev_id;
+       int i;
+
+       for (i = 0; i < em_conf->nb_eventdev; i++) {
+
+               /* Get the event dev conf */
+               eventdev_config = &(em_conf->eventdev_config[i]);
+               eventdev_id = eventdev_config->eventdev_id;
+
+               /* Check if event device has internal port for Rx & Tx */
+               if (eh_dev_has_rx_internal_port(eventdev_id) &&
+                   eh_dev_has_tx_internal_port(eventdev_id)) {
+                       eventdev_config->all_internal_ports = 1;
+               } else {
+                       all_internal_ports = 0;
+               }
+       }
+
+       /*
+        * If Rx & Tx internal ports are supported by all event devices then
+        * eth cores won't be required. Override the eth core mask requested
+        * and decrement number of event queues by one as it won't be needed
+        * for Tx.
+        */
+       if (all_internal_ports) {
+               rte_bitmap_reset(em_conf->eth_core_mask);
+               for (i = 0; i < em_conf->nb_eventdev; i++)
+                       em_conf->eventdev_config[i].nb_eventqueue--;
+       }
+}
+
 static int
 eh_set_default_conf_link(struct eventmode_conf *em_conf)
 {
@@ -234,7 +315,10 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
        struct rx_adapter_connection_info *conn;
        struct eventdev_params *eventdev_config;
        struct rx_adapter_conf *adapter;
+       bool rx_internal_port = true;
        bool single_ev_queue = false;
+       int nb_eventqueue;
+       uint32_t caps = 0;
        int eventdev_id;
        int nb_eth_dev;
        int adapter_id;
@@ -264,14 +348,21 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
        /* Set adapter conf */
        adapter->eventdev_id = eventdev_id;
        adapter->adapter_id = adapter_id;
-       adapter->rx_core_id = eh_get_next_eth_core(em_conf);
+
+       /*
+        * If event device does not have internal ports for passing
+        * packets then reserved one queue for Tx path
+        */
+       nb_eventqueue = eventdev_config->all_internal_ports ?
+                       eventdev_config->nb_eventqueue :
+                       eventdev_config->nb_eventqueue - 1;
 
        /*
         * Map all queues of eth device (port) to an event queue. If there
         * are more event queues than eth ports then create 1:1 mapping.
         * Otherwise map all eth ports to a single event queue.
         */
-       if (nb_eth_dev > eventdev_config->nb_eventqueue)
+       if (nb_eth_dev > nb_eventqueue)
                single_ev_queue = true;
 
        for (i = 0; i < nb_eth_dev; i++) {
@@ -293,11 +384,24 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
                /* Add all eth queues eth port to event queue */
                conn->ethdev_rx_qid = -1;
 
+               /* Get Rx adapter capabilities */
+               rte_event_eth_rx_adapter_caps_get(eventdev_id, i, &caps);
+               if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
+                       rx_internal_port = false;
+
                /* Update no of connections */
                adapter->nb_connections++;
 
        }
 
+       if (rx_internal_port) {
+               /* Rx core is not required */
+               adapter->rx_core_id = -1;
+       } else {
+               /* Rx core is required */
+               adapter->rx_core_id = eh_get_next_eth_core(em_conf);
+       }
+
        /* We have setup one adapter */
        em_conf->nb_rx_adapter = 1;
 
@@ -310,6 +414,8 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)
        struct tx_adapter_connection_info *conn;
        struct eventdev_params *eventdev_config;
        struct tx_adapter_conf *tx_adapter;
+       bool tx_internal_port = true;
+       uint32_t caps = 0;
        int eventdev_id;
        int adapter_id;
        int nb_eth_dev;
@@ -343,18 +449,6 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)
        tx_adapter->eventdev_id = eventdev_id;
        tx_adapter->adapter_id = adapter_id;
 
-       /* TODO: Tx core is required only when internal port is not present */
-       tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);
-
-       /*
-        * Application uses one event queue per adapter for submitting
-        * packets for Tx. Reserve the last queue available and decrement
-        * the total available event queues for this
-        */
-
-       /* Queue numbers start at 0 */
-       tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;
-
        /*
         * Map all Tx queues of the eth device (port) to the event device.
         */
@@ -384,10 +478,30 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)
                /* Add all eth tx queues to adapter */
                conn->ethdev_tx_qid = -1;
 
+               /* Get Tx adapter capabilities */
+               rte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);
+               if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
+                       tx_internal_port = false;
+
                /* Update no of connections */
                tx_adapter->nb_connections++;
        }
 
+       if (tx_internal_port) {
+               /* Tx core is not required */
+               tx_adapter->tx_core_id = -1;
+       } else {
+               /* Tx core is required */
+               tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);
+
+               /*
+                * Use one event queue per adapter for submitting packets
+                * for Tx. Reserving the last queue available
+                */
+               /* Queue numbers start at 0 */
+               tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;
+       }
+
        /* We have setup one adapter */
        em_conf->nb_tx_adapter = 1;
        return 0;
@@ -408,6 +522,9 @@ eh_validate_conf(struct eventmode_conf *em_conf)
                        return ret;
        }
 
+       /* Perform capability check for the selected event devices */
+       eh_do_capability_check(em_conf);
+
        /*
         * Check if links are specified. Else generate a default config for
         * the event ports used.
@@ -511,11 +628,13 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)
                                        eventdev_config->ev_queue_mode;
                        /*
                         * All queues need to be set with sched_type as
-                        * schedule type for the application stage. One queue
-                        * would be reserved for the final eth tx stage. This
-                        * will be an atomic queue.
+                        * schedule type for the application stage. One
+                        * queue would be reserved for the final eth tx
+                        * stage if event device does not have internal
+                        * ports. This will be an atomic queue.
                         */
-                       if (j == nb_eventqueue-1) {
+                       if (!eventdev_config->all_internal_ports &&
+                           j == nb_eventqueue-1) {
                                eventq_conf.schedule_type =
                                        RTE_SCHED_TYPE_ATOMIC;
                        } else {
@@ -689,6 +808,263 @@ eh_initialize_rx_adapter(struct eventmode_conf *em_conf)
        return 0;
 }
 
+static int32_t
+eh_start_worker_eth_core(struct eventmode_conf *conf, uint32_t lcore_id)
+{
+       uint32_t service_id[EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE];
+       struct rx_adapter_conf *rx_adapter;
+       struct tx_adapter_conf *tx_adapter;
+       int service_count = 0;
+       int adapter_id;
+       int32_t ret;
+       int i;
+
+       EH_LOG_INFO("Entering eth_core processing on lcore %u", lcore_id);
+
+       /*
+        * Parse adapter config to check which of all Rx adapters need
+        * to be handled by this core.
+        */
+       for (i = 0; i < conf->nb_rx_adapter; i++) {
+               /* Check if we have exceeded the max allowed */
+               if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE) {
+                       EH_LOG_ERR(
+                             "Exceeded the max allowed adapters per rx core");
+                       break;
+               }
+
+               rx_adapter = &(conf->rx_adapter[i]);
+               if (rx_adapter->rx_core_id != lcore_id)
+                       continue;
+
+               /* Adapter is handled by this core */
+               adapter_id = rx_adapter->adapter_id;
+
+               /* Get the service ID for the adapters */
+               ret = rte_event_eth_rx_adapter_service_id_get(adapter_id,
+                               &(service_id[service_count]));
+
+               if (ret != -ESRCH && ret < 0) {
+                       EH_LOG_ERR(
+                               "Failed to get service id used by rx adapter");
+                       return ret;
+               }
+
+               /* Update service count */
+               service_count++;
+       }
+
+       /*
+        * Parse adapter config to see which of all Tx adapters need
+        * to be handled by this core.
+        */
+       for (i = 0; i < conf->nb_tx_adapter; i++) {
+               /* Check if we have exceeded the max allowed */
+               if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_TX_CORE) {
+                       EH_LOG_ERR(
+                               "Exceeded the max allowed adapters per tx core");
+                       break;
+               }
+
+               tx_adapter = &conf->tx_adapter[i];
+               if (tx_adapter->tx_core_id != lcore_id)
+                       continue;
+
+               /* Adapter is handled by this core */
+               adapter_id = tx_adapter->adapter_id;
+
+               /* Get the service ID for the adapters */
+               ret = rte_event_eth_tx_adapter_service_id_get(adapter_id,
+                               &(service_id[service_count]));
+
+               if (ret != -ESRCH && ret < 0) {
+                       EH_LOG_ERR(
+                               "Failed to get service id used by tx adapter");
+                       return ret;
+               }
+
+               /* Update service count */
+               service_count++;
+       }
+
+       eth_core_running = true;
+
+       while (eth_core_running) {
+               for (i = 0; i < service_count; i++) {
+                       /* Initiate adapter service */
+                       rte_service_run_iter_on_app_lcore(service_id[i], 0);
+               }
+       }
+
+       return 0;
+}
+
+static int32_t
+eh_stop_worker_eth_core(void)
+{
+       if (eth_core_running) {
+               EH_LOG_INFO("Stopping eth cores");
+               eth_core_running = false;
+       }
+       return 0;
+}
+
+static struct eh_app_worker_params *
+eh_find_worker(uint32_t lcore_id, struct eh_conf *conf,
+               struct eh_app_worker_params *app_wrkrs, uint8_t nb_wrkr_param)
+{
+       struct eh_app_worker_params curr_conf = { {{0} }, NULL};
+       struct eh_event_link_info *link = NULL;
+       struct eh_app_worker_params *tmp_wrkr;
+       struct eventmode_conf *em_conf;
+       uint8_t eventdev_id;
+       int i;
+
+       /* Get eventmode config */
+       em_conf = conf->mode_params;
+
+       /*
+        * Use event device from the first lcore-event link.
+        *
+        * Assumption: All lcore-event links tied to a core are using the
+        * same event device. In other words, one core would be polling on
+        * queues of a single event device only.
+        */
+
+       /* Get a link for this lcore */
+       for (i = 0; i < em_conf->nb_link; i++) {
+               link = &(em_conf->link[i]);
+               if (link->lcore_id == lcore_id)
+                       break;
+       }
+
+       if (link == NULL) {
+               EH_LOG_ERR("No valid link found for lcore %d", lcore_id);
+               return NULL;
+       }
+
+       /* Get event dev ID */
+       eventdev_id = link->eventdev_id;
+
+       /* Populate the curr_conf with the capabilities */
+
+       /* Check for Tx internal port */
+       if (eh_dev_has_tx_internal_port(eventdev_id))
+               curr_conf.cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
+       else
+               curr_conf.cap.tx_internal_port = EH_TX_TYPE_NO_INTERNAL_PORT;
+
+       /* Check for burst mode */
+       if (eh_dev_has_burst_mode(eventdev_id))
+               curr_conf.cap.burst = EH_RX_TYPE_BURST;
+       else
+               curr_conf.cap.burst = EH_RX_TYPE_NON_BURST;
+
+       /* Parse the passed list and see if we have matching capabilities */
+
+       /* Initialize the pointer used to traverse the list */
+       tmp_wrkr = app_wrkrs;
+
+       for (i = 0; i < nb_wrkr_param; i++, tmp_wrkr++) {
+
+               /* Skip this if capabilities are not matching */
+               if (tmp_wrkr->cap.u64 != curr_conf.cap.u64)
+                       continue;
+
+               /* If the checks pass, we have a match */
+               return tmp_wrkr;
+       }
+
+       return NULL;
+}
+
+static int
+eh_verify_match_worker(struct eh_app_worker_params *match_wrkr)
+{
+       /* Verify registered worker */
+       if (match_wrkr->worker_thread == NULL) {
+               EH_LOG_ERR("No worker registered");
+               return 0;
+       }
+
+       /* Success */
+       return 1;
+}
+
+static uint8_t
+eh_get_event_lcore_links(uint32_t lcore_id, struct eh_conf *conf,
+               struct eh_event_link_info **links)
+{
+       struct eh_event_link_info *link_cache;
+       struct eventmode_conf *em_conf = NULL;
+       struct eh_event_link_info *link;
+       uint8_t lcore_nb_link = 0;
+       size_t single_link_size;
+       size_t cache_size;
+       int index = 0;
+       int i;
+
+       if (conf == NULL || links == NULL) {
+               EH_LOG_ERR("Invalid args");
+               return -EINVAL;
+       }
+
+       /* Get eventmode conf */
+       em_conf = conf->mode_params;
+
+       if (em_conf == NULL) {
+               EH_LOG_ERR("Invalid event mode parameters");
+               return -EINVAL;
+       }
+
+       /* Get the number of links registered */
+       for (i = 0; i < em_conf->nb_link; i++) {
+
+               /* Get link */
+               link = &(em_conf->link[i]);
+
+               /* Check if we have link intended for this lcore */
+               if (link->lcore_id == lcore_id) {
+
+                       /* Update the number of links for this core */
+                       lcore_nb_link++;
+
+               }
+       }
+
+       /* Compute size of one entry to be copied */
+       single_link_size = sizeof(struct eh_event_link_info);
+
+       /* Compute size of the buffer required */
+       cache_size = lcore_nb_link * sizeof(struct eh_event_link_info);
+
+       /* Compute size of the buffer required */
+       link_cache = calloc(1, cache_size);
+
+       /* Get the number of links registered */
+       for (i = 0; i < em_conf->nb_link; i++) {
+
+               /* Get link */
+               link = &(em_conf->link[i]);
+
+               /* Check if we have link intended for this lcore */
+               if (link->lcore_id == lcore_id) {
+
+                       /* Cache the link */
+                       memcpy(&link_cache[index], link, single_link_size);
+
+                       /* Update index */
+                       index++;
+               }
+       }
+
+       /* Update the links for application to use the cached links */
+       *links = link_cache;
+
+       /* Return the number of cached links */
+       return lcore_nb_link;
+}
+
 static int
 eh_tx_adapter_configure(struct eventmode_conf *em_conf,
                struct tx_adapter_conf *adapter)
@@ -749,6 +1125,16 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,
                }
        }
 
+       /*
+        * Check if Tx core is assigned. If Tx core is not assigned then
+        * the adapter has internal port for submitting Tx packets and
+        * Tx event queue & port setup is not required
+        */
+       if (adapter->tx_core_id == (uint32_t) (-1)) {
+               /* Internal port is present */
+               goto skip_tx_queue_port_setup;
+       }
+
        /* Setup Tx queue & port */
 
        /* Get event port used by the adapter */
@@ -788,6 +1174,7 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,
 
        rte_service_set_runstate_mapped_check(service_id, 0);
 
+skip_tx_queue_port_setup:
        /* Start adapter */
        ret = rte_event_eth_tx_adapter_start(adapter->adapter_id);
        if (ret < 0) {
@@ -816,6 +1203,322 @@ eh_initialize_tx_adapter(struct eventmode_conf *em_conf)
        return 0;
 }
 
+static void
+eh_display_operating_mode(struct eventmode_conf *em_conf)
+{
+       char sched_types[][32] = {
+               "RTE_SCHED_TYPE_ORDERED",
+               "RTE_SCHED_TYPE_ATOMIC",
+               "RTE_SCHED_TYPE_PARALLEL",
+       };
+       EH_LOG_INFO("Operating mode:");
+
+       EH_LOG_INFO("\tScheduling type: \t%s",
+               sched_types[em_conf->ext_params.sched_type]);
+
+       EH_LOG_INFO("");
+}
+
+static void
+eh_display_event_dev_conf(struct eventmode_conf *em_conf)
+{
+       char queue_mode[][32] = {
+               "",
+               "ATQ (ALL TYPE QUEUE)",
+               "SINGLE LINK",
+       };
+       char print_buf[256] = { 0 };
+       int i;
+
+       EH_LOG_INFO("Event Device Configuration:");
+
+       for (i = 0; i < em_conf->nb_eventdev; i++) {
+               sprintf(print_buf,
+                       "\tDev ID: %-2d \tQueues: %-2d \tPorts: %-2d",
+                       em_conf->eventdev_config[i].eventdev_id,
+                       em_conf->eventdev_config[i].nb_eventqueue,
+                       em_conf->eventdev_config[i].nb_eventport);
+               sprintf(print_buf + strlen(print_buf),
+                       "\tQueue mode: %s",
+                       queue_mode[em_conf->eventdev_config[i].ev_queue_mode]);
+               EH_LOG_INFO("%s", print_buf);
+       }
+       EH_LOG_INFO("");
+}
+
+static void
+eh_display_rx_adapter_conf(struct eventmode_conf *em_conf)
+{
+       int nb_rx_adapter = em_conf->nb_rx_adapter;
+       struct rx_adapter_connection_info *conn;
+       struct rx_adapter_conf *adapter;
+       char print_buf[256] = { 0 };
+       int i, j;
+
+       EH_LOG_INFO("Rx adapters configured: %d", nb_rx_adapter);
+
+       for (i = 0; i < nb_rx_adapter; i++) {
+               adapter = &(em_conf->rx_adapter[i]);
+               sprintf(print_buf,
+                       "\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
+                       adapter->adapter_id,
+                       adapter->nb_connections,
+                       adapter->eventdev_id);
+               if (adapter->rx_core_id == (uint32_t)-1)
+                       sprintf(print_buf + strlen(print_buf),
+                               "\tRx core: %-2s", "[INTERNAL PORT]");
+               else if (adapter->rx_core_id == RTE_MAX_LCORE)
+                       sprintf(print_buf + strlen(print_buf),
+                               "\tRx core: %-2s", "[NONE]");
+               else
+                       sprintf(print_buf + strlen(print_buf),
+                               "\tRx core: %-2d", adapter->rx_core_id);
+
+               EH_LOG_INFO("%s", print_buf);
+
+               for (j = 0; j < adapter->nb_connections; j++) {
+                       conn = &(adapter->conn[j]);
+
+                       sprintf(print_buf,
+                               "\t\tEthdev ID: %-2d", conn->ethdev_id);
+
+                       if (conn->ethdev_rx_qid == -1)
+                               sprintf(print_buf + strlen(print_buf),
+                                       "\tEth rx queue: %-2s", "ALL");
+                       else
+                               sprintf(print_buf + strlen(print_buf),
+                                       "\tEth rx queue: %-2d",
+                                       conn->ethdev_rx_qid);
+
+                       sprintf(print_buf + strlen(print_buf),
+                               "\tEvent queue: %-2d", conn->eventq_id);
+                       EH_LOG_INFO("%s", print_buf);
+               }
+       }
+       EH_LOG_INFO("");
+}
+
+static void
+eh_display_tx_adapter_conf(struct eventmode_conf *em_conf)
+{
+       int nb_tx_adapter = em_conf->nb_tx_adapter;
+       struct tx_adapter_connection_info *conn;
+       struct tx_adapter_conf *adapter;
+       char print_buf[256] = { 0 };
+       int i, j;
+
+       EH_LOG_INFO("Tx adapters configured: %d", nb_tx_adapter);
+
+       for (i = 0; i < nb_tx_adapter; i++) {
+               adapter = &(em_conf->tx_adapter[i]);
+               sprintf(print_buf,
+                       "\tTx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
+                       adapter->adapter_id,
+                       adapter->nb_connections,
+                       adapter->eventdev_id);
+               if (adapter->tx_core_id == (uint32_t)-1)
+                       sprintf(print_buf + strlen(print_buf),
+                               "\tTx core: %-2s", "[INTERNAL PORT]");
+               else if (adapter->tx_core_id == RTE_MAX_LCORE)
+                       sprintf(print_buf + strlen(print_buf),
+                               "\tTx core: %-2s", "[NONE]");
+               else
+                       sprintf(print_buf + strlen(print_buf),
+                               "\tTx core: %-2d,\tInput event queue: %-2d",
+                               adapter->tx_core_id, adapter->tx_ev_queue);
+
+               EH_LOG_INFO("%s", print_buf);
+
+               for (j = 0; j < adapter->nb_connections; j++) {
+                       conn = &(adapter->conn[j]);
+
+                       sprintf(print_buf,
+                               "\t\tEthdev ID: %-2d", conn->ethdev_id);
+
+                       if (conn->ethdev_tx_qid == -1)
+                               sprintf(print_buf + strlen(print_buf),
+                                       "\tEth tx queue: %-2s", "ALL");
+                       else
+                               sprintf(print_buf + strlen(print_buf),
+                                       "\tEth tx queue: %-2d",
+                                       conn->ethdev_tx_qid);
+                       EH_LOG_INFO("%s", print_buf);
+               }
+       }
+       EH_LOG_INFO("");
+}
+
+static void
+eh_display_link_conf(struct eventmode_conf *em_conf)
+{
+       struct eh_event_link_info *link;
+       char print_buf[256] = { 0 };
+       int i;
+
+       EH_LOG_INFO("Links configured: %d", em_conf->nb_link);
+
+       for (i = 0; i < em_conf->nb_link; i++) {
+               link = &(em_conf->link[i]);
+
+               sprintf(print_buf,
+                       "\tEvent dev ID: %-2d\tEvent port: %-2d",
+                       link->eventdev_id,
+                       link->event_port_id);
+
+               if (em_conf->ext_params.all_ev_queue_to_ev_port)
+                       sprintf(print_buf + strlen(print_buf),
+                               "Event queue: %-2s\t", "ALL");
+               else
+                       sprintf(print_buf + strlen(print_buf),
+                               "Event queue: %-2d\t", link->eventq_id);
+
+               sprintf(print_buf + strlen(print_buf),
+                       "Lcore: %-2d", link->lcore_id);
+               EH_LOG_INFO("%s", print_buf);
+       }
+       EH_LOG_INFO("");
+}
+
+struct eh_conf *
+eh_conf_init(void)
+{
+       struct eventmode_conf *em_conf = NULL;
+       struct eh_conf *conf = NULL;
+       unsigned int eth_core_id;
+       void *bitmap = NULL;
+       uint32_t nb_bytes;
+
+       /* Allocate memory for config */
+       conf = calloc(1, sizeof(struct eh_conf));
+       if (conf == NULL) {
+               EH_LOG_ERR("Failed to allocate memory for eventmode helper "
+                          "config");
+               return NULL;
+       }
+
+       /* Set default conf */
+
+       /* Packet transfer mode: poll */
+       conf->mode = EH_PKT_TRANSFER_MODE_POLL;
+
+       /* Keep all ethernet ports enabled by default */
+       conf->eth_portmask = -1;
+
+       /* Allocate memory for event mode params */
+       conf->mode_params = calloc(1, sizeof(struct eventmode_conf));
+       if (conf->mode_params == NULL) {
+               EH_LOG_ERR("Failed to allocate memory for event mode params");
+               goto free_conf;
+       }
+
+       /* Get eventmode conf */
+       em_conf = conf->mode_params;
+
+       /* Allocate and initialize bitmap for eth cores */
+       nb_bytes = rte_bitmap_get_memory_footprint(RTE_MAX_LCORE);
+       if (!nb_bytes) {
+               EH_LOG_ERR("Failed to get bitmap footprint");
+               goto free_em_conf;
+       }
+
+       bitmap = rte_zmalloc("event-helper-ethcore-bitmap", nb_bytes,
+                            RTE_CACHE_LINE_SIZE);
+       if (!bitmap) {
+               EH_LOG_ERR("Failed to allocate memory for eth cores bitmap\n");
+               goto free_em_conf;
+       }
+
+       em_conf->eth_core_mask = rte_bitmap_init(RTE_MAX_LCORE, bitmap,
+                                                nb_bytes);
+       if (!em_conf->eth_core_mask) {
+               EH_LOG_ERR("Failed to initialize bitmap");
+               goto free_bitmap;
+       }
+
+       /* Set schedule type as not set */
+       em_conf->ext_params.sched_type = SCHED_TYPE_NOT_SET;
+
+       /* Set two cores as eth cores for Rx & Tx */
+
+       /* Use first core other than master core as Rx core */
+       eth_core_id = rte_get_next_lcore(0,     /* curr core */
+                                        1,     /* skip master core */
+                                        0      /* wrap */);
+
+       rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
+
+       /* Use next core as Tx core */
+       eth_core_id = rte_get_next_lcore(eth_core_id,   /* curr core */
+                                        1,             /* skip master core */
+                                        0              /* wrap */);
+
+       rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
+
+       return conf;
+
+free_bitmap:
+       rte_free(bitmap);
+free_em_conf:
+       free(em_conf);
+free_conf:
+       free(conf);
+       return NULL;
+}
+
+void
+eh_conf_uninit(struct eh_conf *conf)
+{
+       struct eventmode_conf *em_conf = NULL;
+
+       if (!conf || !conf->mode_params)
+               return;
+
+       /* Get eventmode conf */
+       em_conf = conf->mode_params;
+
+       /* Free evenmode configuration memory */
+       rte_free(em_conf->eth_core_mask);
+       free(em_conf);
+       free(conf);
+}
+
+void
+eh_display_conf(struct eh_conf *conf)
+{
+       struct eventmode_conf *em_conf;
+
+       if (conf == NULL) {
+               EH_LOG_ERR("Invalid event helper configuration");
+               return;
+       }
+
+       if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
+               return;
+
+       if (conf->mode_params == NULL) {
+               EH_LOG_ERR("Invalid event mode parameters");
+               return;
+       }
+
+       /* Get eventmode conf */
+       em_conf = (struct eventmode_conf *)(conf->mode_params);
+
+       /* Display user exposed operating modes */
+       eh_display_operating_mode(em_conf);
+
+       /* Display event device conf */
+       eh_display_event_dev_conf(em_conf);
+
+       /* Display Rx adapter conf */
+       eh_display_rx_adapter_conf(em_conf);
+
+       /* Display Tx adapter conf */
+       eh_display_tx_adapter_conf(em_conf);
+
+       /* Display event-lcore link */
+       eh_display_link_conf(em_conf);
+}
+
 int32_t
 eh_devs_init(struct eh_conf *conf)
 {
@@ -849,6 +1552,9 @@ eh_devs_init(struct eh_conf *conf)
                return ret;
        }
 
+       /* Display the current configuration */
+       eh_display_conf(conf);
+
        /* Stop eth devices before setting up adapter */
        RTE_ETH_FOREACH_DEV(port_id) {
 
@@ -995,6 +1701,79 @@ eh_devs_uninit(struct eh_conf *conf)
        return 0;
 }
 
+void
+eh_launch_worker(struct eh_conf *conf, struct eh_app_worker_params *app_wrkr,
+               uint8_t nb_wrkr_param)
+{
+       struct eh_app_worker_params *match_wrkr;
+       struct eh_event_link_info *links = NULL;
+       struct eventmode_conf *em_conf;
+       uint32_t lcore_id;
+       uint8_t nb_links;
+
+       if (conf == NULL) {
+               EH_LOG_ERR("Invalid event helper configuration");
+               return;
+       }
+
+       if (conf->mode_params == NULL) {
+               EH_LOG_ERR("Invalid event mode parameters");
+               return;
+       }
+
+       /* Get eventmode conf */
+       em_conf = conf->mode_params;
+
+       /* Get core ID */
+       lcore_id = rte_lcore_id();
+
+       /* Check if this is eth core */
+       if (rte_bitmap_get(em_conf->eth_core_mask, lcore_id)) {
+               eh_start_worker_eth_core(em_conf, lcore_id);
+               return;
+       }
+
+       if (app_wrkr == NULL || nb_wrkr_param == 0) {
+               EH_LOG_ERR("Invalid args");
+               return;
+       }
+
+       /*
+        * This is a regular worker thread. The application registers
+        * multiple workers with various capabilities. Run worker
+        * based on the selected capabilities of the event
+        * device configured.
+        */
+
+       /* Get the first matching worker for the event device */
+       match_wrkr = eh_find_worker(lcore_id, conf, app_wrkr, nb_wrkr_param);
+       if (match_wrkr == NULL) {
+               EH_LOG_ERR("Failed to match worker registered for lcore %d",
+                          lcore_id);
+               goto clean_and_exit;
+       }
+
+       /* Verify sanity of the matched worker */
+       if (eh_verify_match_worker(match_wrkr) != 1) {
+               EH_LOG_ERR("Failed to validate the matched worker");
+               goto clean_and_exit;
+       }
+
+       /* Get worker links */
+       nb_links = eh_get_event_lcore_links(lcore_id, conf, &links);
+
+       /* Launch the worker thread */
+       match_wrkr->worker_thread(links, nb_links);
+
+       /* Free links info memory */
+       free(links);
+
+clean_and_exit:
+
+       /* Flag eth_cores to stop, if started */
+       eh_stop_worker_eth_core();
+}
+
 uint8_t
 eh_get_tx_queue(struct eh_conf *conf, uint8_t eventdev_id)
 {