+static int32_t
+eh_start_worker_eth_core(struct eventmode_conf *conf, uint32_t lcore_id)
+{
+ uint32_t service_id[EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE];
+ struct rx_adapter_conf *rx_adapter;
+ struct tx_adapter_conf *tx_adapter;
+ int service_count = 0;
+ int adapter_id;
+ int32_t ret;
+ int i;
+
+ EH_LOG_INFO("Entering eth_core processing on lcore %u", lcore_id);
+
+ /*
+ * Parse adapter config to check which of all Rx adapters need
+ * to be handled by this core.
+ */
+ for (i = 0; i < conf->nb_rx_adapter; i++) {
+ /* Check if we have exceeded the max allowed */
+ if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE) {
+ EH_LOG_ERR(
+ "Exceeded the max allowed adapters per rx core");
+ break;
+ }
+
+ rx_adapter = &(conf->rx_adapter[i]);
+ if (rx_adapter->rx_core_id != lcore_id)
+ continue;
+
+ /* Adapter is handled by this core */
+ adapter_id = rx_adapter->adapter_id;
+
+ /* Get the service ID for the adapters */
+ ret = rte_event_eth_rx_adapter_service_id_get(adapter_id,
+ &(service_id[service_count]));
+
+ if (ret != -ESRCH && ret < 0) {
+ EH_LOG_ERR(
+ "Failed to get service id used by rx adapter");
+ return ret;
+ }
+
+ /* Update service count */
+ service_count++;
+ }
+
+ /*
+ * Parse adapter config to see which of all Tx adapters need
+ * to be handled by this core.
+ */
+ for (i = 0; i < conf->nb_tx_adapter; i++) {
+ /* Check if we have exceeded the max allowed */
+ if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_TX_CORE) {
+ EH_LOG_ERR(
+ "Exceeded the max allowed adapters per tx core");
+ break;
+ }
+
+ tx_adapter = &conf->tx_adapter[i];
+ if (tx_adapter->tx_core_id != lcore_id)
+ continue;
+
+ /* Adapter is handled by this core */
+ adapter_id = tx_adapter->adapter_id;
+
+ /* Get the service ID for the adapters */
+ ret = rte_event_eth_tx_adapter_service_id_get(adapter_id,
+ &(service_id[service_count]));
+
+ if (ret != -ESRCH && ret < 0) {
+ EH_LOG_ERR(
+ "Failed to get service id used by tx adapter");
+ return ret;
+ }
+
+ /* Update service count */
+ service_count++;
+ }
+
+ eth_core_running = true;
+
+ while (eth_core_running) {
+ for (i = 0; i < service_count; i++) {
+ /* Initiate adapter service */
+ rte_service_run_iter_on_app_lcore(service_id[i], 0);
+ }
+ }
+
+ return 0;
+}
+
+static int32_t
+eh_stop_worker_eth_core(void)
+{
+ if (eth_core_running) {
+ EH_LOG_INFO("Stopping eth cores");
+ eth_core_running = false;
+ }
+ return 0;
+}
+
+static struct eh_app_worker_params *
+eh_find_worker(uint32_t lcore_id, struct eh_conf *conf,
+ struct eh_app_worker_params *app_wrkrs, uint8_t nb_wrkr_param)
+{
+ struct eh_app_worker_params curr_conf = { {{0} }, NULL};
+ struct eh_event_link_info *link = NULL;
+ struct eh_app_worker_params *tmp_wrkr;
+ struct eventmode_conf *em_conf;
+ uint8_t eventdev_id;
+ int i;
+
+ /* Get eventmode config */
+ em_conf = conf->mode_params;
+
+ /*
+ * Use event device from the first lcore-event link.
+ *
+ * Assumption: All lcore-event links tied to a core are using the
+ * same event device. In other words, one core would be polling on
+ * queues of a single event device only.
+ */
+
+ /* Get a link for this lcore */
+ for (i = 0; i < em_conf->nb_link; i++) {
+ link = &(em_conf->link[i]);
+ if (link->lcore_id == lcore_id)
+ break;
+ }
+
+ if (link == NULL) {
+ EH_LOG_ERR("No valid link found for lcore %d", lcore_id);
+ return NULL;
+ }
+
+ /* Get event dev ID */
+ eventdev_id = link->eventdev_id;
+
+ /* Populate the curr_conf with the capabilities */
+
+ /* Check for burst mode */
+ if (eh_dev_has_burst_mode(eventdev_id))
+ curr_conf.cap.burst = EH_RX_TYPE_BURST;
+ else
+ curr_conf.cap.burst = EH_RX_TYPE_NON_BURST;
+
+ /* Parse the passed list and see if we have matching capabilities */
+
+ /* Initialize the pointer used to traverse the list */
+ tmp_wrkr = app_wrkrs;
+
+ for (i = 0; i < nb_wrkr_param; i++, tmp_wrkr++) {
+
+ /* Skip this if capabilities are not matching */
+ if (tmp_wrkr->cap.u64 != curr_conf.cap.u64)
+ continue;
+
+ /* If the checks pass, we have a match */
+ return tmp_wrkr;
+ }
+
+ return NULL;
+}
+
+static int
+eh_verify_match_worker(struct eh_app_worker_params *match_wrkr)
+{
+ /* Verify registered worker */
+ if (match_wrkr->worker_thread == NULL) {
+ EH_LOG_ERR("No worker registered");
+ return 0;
+ }
+
+ /* Success */
+ return 1;
+}
+
+static uint8_t
+eh_get_event_lcore_links(uint32_t lcore_id, struct eh_conf *conf,
+ struct eh_event_link_info **links)
+{
+ struct eh_event_link_info *link_cache;
+ struct eventmode_conf *em_conf = NULL;
+ struct eh_event_link_info *link;
+ uint8_t lcore_nb_link = 0;
+ size_t single_link_size;
+ size_t cache_size;
+ int index = 0;
+ int i;
+
+ if (conf == NULL || links == NULL) {
+ EH_LOG_ERR("Invalid args");
+ return -EINVAL;
+ }
+
+ /* Get eventmode conf */
+ em_conf = conf->mode_params;
+
+ if (em_conf == NULL) {
+ EH_LOG_ERR("Invalid event mode parameters");
+ return -EINVAL;
+ }
+
+ /* Get the number of links registered */
+ for (i = 0; i < em_conf->nb_link; i++) {
+
+ /* Get link */
+ link = &(em_conf->link[i]);
+
+ /* Check if we have link intended for this lcore */
+ if (link->lcore_id == lcore_id) {
+
+ /* Update the number of links for this core */
+ lcore_nb_link++;
+
+ }
+ }
+
+ /* Compute size of one entry to be copied */
+ single_link_size = sizeof(struct eh_event_link_info);
+
+ /* Compute size of the buffer required */
+ cache_size = lcore_nb_link * sizeof(struct eh_event_link_info);
+
+ /* Compute size of the buffer required */
+ link_cache = calloc(1, cache_size);
+
+ /* Get the number of links registered */
+ for (i = 0; i < em_conf->nb_link; i++) {
+
+ /* Get link */
+ link = &(em_conf->link[i]);
+
+ /* Check if we have link intended for this lcore */
+ if (link->lcore_id == lcore_id) {
+
+ /* Cache the link */
+ memcpy(&link_cache[index], link, single_link_size);
+
+ /* Update index */
+ index++;
+ }
+ }
+
+ /* Update the links for application to use the cached links */
+ *links = link_cache;
+
+ /* Return the number of cached links */
+ return lcore_nb_link;
+}
+