1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
4 #include <rte_bitmap.h>
5 #include <rte_ethdev.h>
6 #include <rte_eventdev.h>
7 #include <rte_event_eth_rx_adapter.h>
8 #include <rte_event_eth_tx_adapter.h>
9 #include <rte_malloc.h>
12 #include "event_helper.h"
14 static volatile bool eth_core_running;
17 eh_get_enabled_cores(struct rte_bitmap *eth_core_mask)
21 RTE_LCORE_FOREACH(i) {
22 /* Check if this core is enabled in core mask*/
23 if (rte_bitmap_get(eth_core_mask, i)) {
24 /* Found enabled core */
31 static inline unsigned int
32 eh_get_next_eth_core(struct eventmode_conf *em_conf)
34 static unsigned int prev_core = -1;
35 unsigned int next_core;
38 * Make sure we have at least one eth core running, else the following
39 * logic would lead to an infinite loop.
41 if (eh_get_enabled_cores(em_conf->eth_core_mask) == 0) {
42 EH_LOG_ERR("No enabled eth core found");
46 /* Only some cores are marked as eth cores, skip others */
48 /* Get the next core */
49 next_core = rte_get_next_lcore(prev_core, 0, 1);
51 /* Check if we have reached max lcores */
52 if (next_core == RTE_MAX_LCORE)
55 /* Update prev_core */
56 prev_core = next_core;
57 } while (!(rte_bitmap_get(em_conf->eth_core_mask, next_core)));
62 static inline unsigned int
63 eh_get_next_active_core(struct eventmode_conf *em_conf, unsigned int prev_core)
65 unsigned int next_core;
67 /* Get next active core skipping cores reserved as eth cores */
69 /* Get the next core */
70 next_core = rte_get_next_lcore(prev_core, 0, 0);
72 /* Check if we have reached max lcores */
73 if (next_core == RTE_MAX_LCORE)
76 prev_core = next_core;
77 } while (rte_bitmap_get(em_conf->eth_core_mask, next_core));
82 static struct eventdev_params *
83 eh_get_eventdev_params(struct eventmode_conf *em_conf, uint8_t eventdev_id)
87 for (i = 0; i < em_conf->nb_eventdev; i++) {
88 if (em_conf->eventdev_config[i].eventdev_id == eventdev_id)
93 if (i == em_conf->nb_eventdev)
96 return &(em_conf->eventdev_config[i]);
100 eh_dev_has_rx_internal_port(uint8_t eventdev_id)
105 RTE_ETH_FOREACH_DEV(j) {
108 rte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps);
109 if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
116 eh_dev_has_tx_internal_port(uint8_t eventdev_id)
121 RTE_ETH_FOREACH_DEV(j) {
124 rte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps);
125 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
132 eh_dev_has_burst_mode(uint8_t dev_id)
134 struct rte_event_dev_info dev_info;
136 rte_event_dev_info_get(dev_id, &dev_info);
137 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
142 eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
144 int lcore_count, nb_eventdev, nb_eth_dev, ret;
145 struct eventdev_params *eventdev_config;
146 struct rte_event_dev_info dev_info;
148 /* Get the number of event devices */
149 nb_eventdev = rte_event_dev_count();
150 if (nb_eventdev == 0) {
151 EH_LOG_ERR("No event devices detected");
155 if (nb_eventdev != 1) {
156 EH_LOG_ERR("Event mode does not support multiple event devices. "
157 "Please provide only one event device.");
161 /* Get the number of eth devs */
162 nb_eth_dev = rte_eth_dev_count_avail();
163 if (nb_eth_dev == 0) {
164 EH_LOG_ERR("No eth devices detected");
168 /* Get the number of lcores */
169 lcore_count = rte_lcore_count();
171 /* Read event device info */
172 ret = rte_event_dev_info_get(0, &dev_info);
174 EH_LOG_ERR("Failed to read event device info %d", ret);
178 /* Check if enough ports are available */
179 if (dev_info.max_event_ports < 2) {
180 EH_LOG_ERR("Not enough event ports available");
184 /* Get the first event dev conf */
185 eventdev_config = &(em_conf->eventdev_config[0]);
187 /* Save number of queues & ports available */
188 eventdev_config->eventdev_id = 0;
189 eventdev_config->nb_eventqueue = dev_info.max_event_queues;
190 eventdev_config->nb_eventport = dev_info.max_event_ports;
191 eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
193 /* Check if there are more queues than required */
194 if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {
195 /* One queue is reserved for Tx */
196 eventdev_config->nb_eventqueue = nb_eth_dev + 1;
199 /* Check if there are more ports than required */
200 if (eventdev_config->nb_eventport > lcore_count) {
201 /* One port per lcore is enough */
202 eventdev_config->nb_eventport = lcore_count;
205 /* Update the number of event devices */
206 em_conf->nb_eventdev++;
212 eh_do_capability_check(struct eventmode_conf *em_conf)
214 struct eventdev_params *eventdev_config;
215 int all_internal_ports = 1;
216 uint32_t eventdev_id;
219 for (i = 0; i < em_conf->nb_eventdev; i++) {
221 /* Get the event dev conf */
222 eventdev_config = &(em_conf->eventdev_config[i]);
223 eventdev_id = eventdev_config->eventdev_id;
225 /* Check if event device has internal port for Rx & Tx */
226 if (eh_dev_has_rx_internal_port(eventdev_id) &&
227 eh_dev_has_tx_internal_port(eventdev_id)) {
228 eventdev_config->all_internal_ports = 1;
230 all_internal_ports = 0;
235 * If Rx & Tx internal ports are supported by all event devices then
236 * eth cores won't be required. Override the eth core mask requested
237 * and decrement number of event queues by one as it won't be needed
240 if (all_internal_ports) {
241 rte_bitmap_reset(em_conf->eth_core_mask);
242 for (i = 0; i < em_conf->nb_eventdev; i++)
243 em_conf->eventdev_config[i].nb_eventqueue--;
248 eh_set_default_conf_link(struct eventmode_conf *em_conf)
250 struct eventdev_params *eventdev_config;
251 struct eh_event_link_info *link;
252 unsigned int lcore_id = -1;
256 * Create a 1:1 mapping from event ports to cores. If the number
257 * of event ports is lesser than the cores, some cores won't
258 * execute worker. If there are more event ports, then some ports
264 * The event queue-port mapping is done according to the link. Since
265 * we are falling back to the default link config, enabling
266 * "all_ev_queue_to_ev_port" mode flag. This will map all queues
269 em_conf->ext_params.all_ev_queue_to_ev_port = 1;
271 /* Get first event dev conf */
272 eventdev_config = &(em_conf->eventdev_config[0]);
274 /* Loop through the ports */
275 for (i = 0; i < eventdev_config->nb_eventport; i++) {
277 /* Get next active core id */
278 lcore_id = eh_get_next_active_core(em_conf,
281 if (lcore_id == RTE_MAX_LCORE) {
282 /* Reached max cores */
286 /* Save the current combination as one link */
289 link_index = em_conf->nb_link;
291 /* Get the corresponding link */
292 link = &(em_conf->link[link_index]);
295 link->eventdev_id = eventdev_config->eventdev_id;
296 link->event_port_id = i;
297 link->lcore_id = lcore_id;
300 * Don't set eventq_id as by default all queues
301 * need to be mapped to the port, which is controlled
302 * by the operating mode.
305 /* Update number of links */
313 eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
315 struct rx_adapter_connection_info *conn;
316 struct eventdev_params *eventdev_config;
317 struct rx_adapter_conf *adapter;
318 bool rx_internal_port = true;
319 bool single_ev_queue = false;
328 /* Create one adapter with eth queues mapped to event queue(s) */
330 if (em_conf->nb_eventdev == 0) {
331 EH_LOG_ERR("No event devs registered");
335 /* Get the number of eth devs */
336 nb_eth_dev = rte_eth_dev_count_avail();
338 /* Use the first event dev */
339 eventdev_config = &(em_conf->eventdev_config[0]);
341 /* Get eventdev ID */
342 eventdev_id = eventdev_config->eventdev_id;
345 /* Get adapter conf */
346 adapter = &(em_conf->rx_adapter[adapter_id]);
348 /* Set adapter conf */
349 adapter->eventdev_id = eventdev_id;
350 adapter->adapter_id = adapter_id;
353 * If event device does not have internal ports for passing
354 * packets then reserved one queue for Tx path
356 nb_eventqueue = eventdev_config->all_internal_ports ?
357 eventdev_config->nb_eventqueue :
358 eventdev_config->nb_eventqueue - 1;
361 * Map all queues of eth device (port) to an event queue. If there
362 * are more event queues than eth ports then create 1:1 mapping.
363 * Otherwise map all eth ports to a single event queue.
365 if (nb_eth_dev > nb_eventqueue)
366 single_ev_queue = true;
368 for (i = 0; i < nb_eth_dev; i++) {
370 /* Use only the ports enabled */
371 if ((em_conf->eth_portmask & (1 << i)) == 0)
374 /* Get the connection id */
375 conn_id = adapter->nb_connections;
377 /* Get the connection */
378 conn = &(adapter->conn[conn_id]);
380 /* Set mapping between eth ports & event queues*/
382 conn->eventq_id = single_ev_queue ? 0 : i;
384 /* Add all eth queues eth port to event queue */
385 conn->ethdev_rx_qid = -1;
387 /* Get Rx adapter capabilities */
388 rte_event_eth_rx_adapter_caps_get(eventdev_id, i, &caps);
389 if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
390 rx_internal_port = false;
392 /* Update no of connections */
393 adapter->nb_connections++;
397 if (rx_internal_port) {
398 /* Rx core is not required */
399 adapter->rx_core_id = -1;
401 /* Rx core is required */
402 adapter->rx_core_id = eh_get_next_eth_core(em_conf);
405 /* We have setup one adapter */
406 em_conf->nb_rx_adapter = 1;
412 eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)
414 struct tx_adapter_connection_info *conn;
415 struct eventdev_params *eventdev_config;
416 struct tx_adapter_conf *tx_adapter;
417 bool tx_internal_port = true;
426 * Create one Tx adapter with all eth queues mapped to event queues
430 if (em_conf->nb_eventdev == 0) {
431 EH_LOG_ERR("No event devs registered");
435 /* Get the number of eth devs */
436 nb_eth_dev = rte_eth_dev_count_avail();
438 /* Use the first event dev */
439 eventdev_config = &(em_conf->eventdev_config[0]);
441 /* Get eventdev ID */
442 eventdev_id = eventdev_config->eventdev_id;
445 /* Get adapter conf */
446 tx_adapter = &(em_conf->tx_adapter[adapter_id]);
448 /* Set adapter conf */
449 tx_adapter->eventdev_id = eventdev_id;
450 tx_adapter->adapter_id = adapter_id;
453 * Map all Tx queues of the eth device (port) to the event device.
456 /* Set defaults for connections */
459 * One eth device (port) is one connection. Map all Tx queues
460 * of the device to the Tx adapter.
463 for (i = 0; i < nb_eth_dev; i++) {
465 /* Use only the ports enabled */
466 if ((em_conf->eth_portmask & (1 << i)) == 0)
469 /* Get the connection id */
470 conn_id = tx_adapter->nb_connections;
472 /* Get the connection */
473 conn = &(tx_adapter->conn[conn_id]);
475 /* Add ethdev to connections */
478 /* Add all eth tx queues to adapter */
479 conn->ethdev_tx_qid = -1;
481 /* Get Tx adapter capabilities */
482 rte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);
483 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
484 tx_internal_port = false;
486 /* Update no of connections */
487 tx_adapter->nb_connections++;
490 if (tx_internal_port) {
491 /* Tx core is not required */
492 tx_adapter->tx_core_id = -1;
494 /* Tx core is required */
495 tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);
498 * Use one event queue per adapter for submitting packets
499 * for Tx. Reserving the last queue available
501 /* Queue numbers start at 0 */
502 tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;
505 /* We have setup one adapter */
506 em_conf->nb_tx_adapter = 1;
511 eh_validate_conf(struct eventmode_conf *em_conf)
516 * Check if event devs are specified. Else probe the event devices
517 * and initialize the config with all ports & queues available
519 if (em_conf->nb_eventdev == 0) {
520 ret = eh_set_default_conf_eventdev(em_conf);
525 /* Perform capability check for the selected event devices */
526 eh_do_capability_check(em_conf);
529 * Check if links are specified. Else generate a default config for
530 * the event ports used.
532 if (em_conf->nb_link == 0) {
533 ret = eh_set_default_conf_link(em_conf);
539 * Check if rx adapters are specified. Else generate a default config
540 * with one rx adapter and all eth queues - event queue mapped.
542 if (em_conf->nb_rx_adapter == 0) {
543 ret = eh_set_default_conf_rx_adapter(em_conf);
549 * Check if tx adapters are specified. Else generate a default config
550 * with one tx adapter.
552 if (em_conf->nb_tx_adapter == 0) {
553 ret = eh_set_default_conf_tx_adapter(em_conf);
562 eh_initialize_eventdev(struct eventmode_conf *em_conf)
564 struct rte_event_queue_conf eventq_conf = {0};
565 struct rte_event_dev_info evdev_default_conf;
566 struct rte_event_dev_config eventdev_conf;
567 struct eventdev_params *eventdev_config;
568 int nb_eventdev = em_conf->nb_eventdev;
569 struct eh_event_link_info *link;
570 uint8_t *queue = NULL;
576 for (i = 0; i < nb_eventdev; i++) {
578 /* Get eventdev config */
579 eventdev_config = &(em_conf->eventdev_config[i]);
581 /* Get event dev ID */
582 eventdev_id = eventdev_config->eventdev_id;
584 /* Get the number of queues */
585 nb_eventqueue = eventdev_config->nb_eventqueue;
587 /* Reset the default conf */
588 memset(&evdev_default_conf, 0,
589 sizeof(struct rte_event_dev_info));
591 /* Get default conf of eventdev */
592 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
595 "Error in getting event device info[devID:%d]",
600 memset(&eventdev_conf, 0, sizeof(struct rte_event_dev_config));
601 eventdev_conf.nb_events_limit =
602 evdev_default_conf.max_num_events;
603 eventdev_conf.nb_event_queues = nb_eventqueue;
604 eventdev_conf.nb_event_ports =
605 eventdev_config->nb_eventport;
606 eventdev_conf.nb_event_queue_flows =
607 evdev_default_conf.max_event_queue_flows;
608 eventdev_conf.nb_event_port_dequeue_depth =
609 evdev_default_conf.max_event_port_dequeue_depth;
610 eventdev_conf.nb_event_port_enqueue_depth =
611 evdev_default_conf.max_event_port_enqueue_depth;
613 /* Configure event device */
614 ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
616 EH_LOG_ERR("Error in configuring event device");
620 /* Configure event queues */
621 for (j = 0; j < nb_eventqueue; j++) {
623 memset(&eventq_conf, 0,
624 sizeof(struct rte_event_queue_conf));
626 /* Per event dev queues can be ATQ or SINGLE LINK */
627 eventq_conf.event_queue_cfg =
628 eventdev_config->ev_queue_mode;
630 * All queues need to be set with sched_type as
631 * schedule type for the application stage. One
632 * queue would be reserved for the final eth tx
633 * stage if event device does not have internal
634 * ports. This will be an atomic queue.
636 if (!eventdev_config->all_internal_ports &&
637 j == nb_eventqueue-1) {
638 eventq_conf.schedule_type =
639 RTE_SCHED_TYPE_ATOMIC;
641 eventq_conf.schedule_type =
642 em_conf->ext_params.sched_type;
645 /* Set max atomic flows to 1024 */
646 eventq_conf.nb_atomic_flows = 1024;
647 eventq_conf.nb_atomic_order_sequences = 1024;
649 /* Setup the queue */
650 ret = rte_event_queue_setup(eventdev_id, j,
653 EH_LOG_ERR("Failed to setup event queue %d",
659 /* Configure event ports */
660 for (j = 0; j < eventdev_config->nb_eventport; j++) {
661 ret = rte_event_port_setup(eventdev_id, j, NULL);
663 EH_LOG_ERR("Failed to setup event port %d",
670 /* Make event queue - event port link */
671 for (j = 0; j < em_conf->nb_link; j++) {
674 link = &(em_conf->link[j]);
676 /* Get event dev ID */
677 eventdev_id = link->eventdev_id;
680 * If "all_ev_queue_to_ev_port" params flag is selected, all
681 * queues need to be mapped to the port.
683 if (em_conf->ext_params.all_ev_queue_to_ev_port)
686 queue = &(link->eventq_id);
688 /* Link queue to port */
689 ret = rte_event_port_link(eventdev_id, link->event_port_id,
692 EH_LOG_ERR("Failed to link event port %d", ret);
697 /* Start event devices */
698 for (i = 0; i < nb_eventdev; i++) {
700 /* Get eventdev config */
701 eventdev_config = &(em_conf->eventdev_config[i]);
703 ret = rte_event_dev_start(eventdev_config->eventdev_id);
705 EH_LOG_ERR("Failed to start event device %d, %d",
714 eh_rx_adapter_configure(struct eventmode_conf *em_conf,
715 struct rx_adapter_conf *adapter)
717 struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
718 struct rte_event_dev_info evdev_default_conf = {0};
719 struct rte_event_port_conf port_conf = {0};
720 struct rx_adapter_connection_info *conn;
726 /* Get event dev ID */
727 eventdev_id = adapter->eventdev_id;
729 /* Get default configuration of event dev */
730 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
732 EH_LOG_ERR("Failed to get event dev info %d", ret);
736 /* Setup port conf */
737 port_conf.new_event_threshold = 1200;
738 port_conf.dequeue_depth =
739 evdev_default_conf.max_event_port_dequeue_depth;
740 port_conf.enqueue_depth =
741 evdev_default_conf.max_event_port_enqueue_depth;
743 /* Create Rx adapter */
744 ret = rte_event_eth_rx_adapter_create(adapter->adapter_id,
745 adapter->eventdev_id, &port_conf);
747 EH_LOG_ERR("Failed to create rx adapter %d", ret);
751 /* Setup various connections in the adapter */
752 for (j = 0; j < adapter->nb_connections; j++) {
754 conn = &(adapter->conn[j]);
756 /* Setup queue conf */
757 queue_conf.ev.queue_id = conn->eventq_id;
758 queue_conf.ev.sched_type = em_conf->ext_params.sched_type;
759 queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
761 /* Add queue to the adapter */
762 ret = rte_event_eth_rx_adapter_queue_add(adapter->adapter_id,
763 conn->ethdev_id, conn->ethdev_rx_qid,
766 EH_LOG_ERR("Failed to add eth queue to rx adapter %d",
772 /* Get the service ID used by rx adapter */
773 ret = rte_event_eth_rx_adapter_service_id_get(adapter->adapter_id,
775 if (ret != -ESRCH && ret < 0) {
776 EH_LOG_ERR("Failed to get service id used by rx adapter %d",
781 rte_service_set_runstate_mapped_check(service_id, 0);
784 ret = rte_event_eth_rx_adapter_start(adapter->adapter_id);
786 EH_LOG_ERR("Failed to start rx adapter %d", ret);
794 eh_initialize_rx_adapter(struct eventmode_conf *em_conf)
796 struct rx_adapter_conf *adapter;
799 /* Configure rx adapters */
800 for (i = 0; i < em_conf->nb_rx_adapter; i++) {
801 adapter = &(em_conf->rx_adapter[i]);
802 ret = eh_rx_adapter_configure(em_conf, adapter);
804 EH_LOG_ERR("Failed to configure rx adapter %d", ret);
812 eh_start_worker_eth_core(struct eventmode_conf *conf, uint32_t lcore_id)
814 uint32_t service_id[EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE];
815 struct rx_adapter_conf *rx_adapter;
816 struct tx_adapter_conf *tx_adapter;
817 int service_count = 0;
822 EH_LOG_INFO("Entering eth_core processing on lcore %u", lcore_id);
825 * Parse adapter config to check which of all Rx adapters need
826 * to be handled by this core.
828 for (i = 0; i < conf->nb_rx_adapter; i++) {
829 /* Check if we have exceeded the max allowed */
830 if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE) {
832 "Exceeded the max allowed adapters per rx core");
836 rx_adapter = &(conf->rx_adapter[i]);
837 if (rx_adapter->rx_core_id != lcore_id)
840 /* Adapter is handled by this core */
841 adapter_id = rx_adapter->adapter_id;
843 /* Get the service ID for the adapters */
844 ret = rte_event_eth_rx_adapter_service_id_get(adapter_id,
845 &(service_id[service_count]));
847 if (ret != -ESRCH && ret < 0) {
849 "Failed to get service id used by rx adapter");
853 /* Update service count */
858 * Parse adapter config to see which of all Tx adapters need
859 * to be handled by this core.
861 for (i = 0; i < conf->nb_tx_adapter; i++) {
862 /* Check if we have exceeded the max allowed */
863 if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_TX_CORE) {
865 "Exceeded the max allowed adapters per tx core");
869 tx_adapter = &conf->tx_adapter[i];
870 if (tx_adapter->tx_core_id != lcore_id)
873 /* Adapter is handled by this core */
874 adapter_id = tx_adapter->adapter_id;
876 /* Get the service ID for the adapters */
877 ret = rte_event_eth_tx_adapter_service_id_get(adapter_id,
878 &(service_id[service_count]));
880 if (ret != -ESRCH && ret < 0) {
882 "Failed to get service id used by tx adapter");
886 /* Update service count */
890 eth_core_running = true;
892 while (eth_core_running) {
893 for (i = 0; i < service_count; i++) {
894 /* Initiate adapter service */
895 rte_service_run_iter_on_app_lcore(service_id[i], 0);
903 eh_stop_worker_eth_core(void)
905 if (eth_core_running) {
906 EH_LOG_INFO("Stopping eth cores");
907 eth_core_running = false;
912 static struct eh_app_worker_params *
913 eh_find_worker(uint32_t lcore_id, struct eh_conf *conf,
914 struct eh_app_worker_params *app_wrkrs, uint8_t nb_wrkr_param)
916 struct eh_app_worker_params curr_conf = { {{0} }, NULL};
917 struct eh_event_link_info *link = NULL;
918 struct eh_app_worker_params *tmp_wrkr;
919 struct eventmode_conf *em_conf;
923 /* Get eventmode config */
924 em_conf = conf->mode_params;
927 * Use event device from the first lcore-event link.
929 * Assumption: All lcore-event links tied to a core are using the
930 * same event device. In other words, one core would be polling on
931 * queues of a single event device only.
934 /* Get a link for this lcore */
935 for (i = 0; i < em_conf->nb_link; i++) {
936 link = &(em_conf->link[i]);
937 if (link->lcore_id == lcore_id)
942 EH_LOG_ERR("No valid link found for lcore %d", lcore_id);
946 /* Get event dev ID */
947 eventdev_id = link->eventdev_id;
949 /* Populate the curr_conf with the capabilities */
951 /* Check for Tx internal port */
952 if (eh_dev_has_tx_internal_port(eventdev_id))
953 curr_conf.cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
955 curr_conf.cap.tx_internal_port = EH_TX_TYPE_NO_INTERNAL_PORT;
957 /* Check for burst mode */
958 if (eh_dev_has_burst_mode(eventdev_id))
959 curr_conf.cap.burst = EH_RX_TYPE_BURST;
961 curr_conf.cap.burst = EH_RX_TYPE_NON_BURST;
963 curr_conf.cap.ipsec_mode = conf->ipsec_mode;
965 /* Parse the passed list and see if we have matching capabilities */
967 /* Initialize the pointer used to traverse the list */
968 tmp_wrkr = app_wrkrs;
970 for (i = 0; i < nb_wrkr_param; i++, tmp_wrkr++) {
972 /* Skip this if capabilities are not matching */
973 if (tmp_wrkr->cap.u64 != curr_conf.cap.u64)
976 /* If the checks pass, we have a match */
984 eh_verify_match_worker(struct eh_app_worker_params *match_wrkr)
986 /* Verify registered worker */
987 if (match_wrkr->worker_thread == NULL) {
988 EH_LOG_ERR("No worker registered");
997 eh_get_event_lcore_links(uint32_t lcore_id, struct eh_conf *conf,
998 struct eh_event_link_info **links)
1000 struct eh_event_link_info *link_cache;
1001 struct eventmode_conf *em_conf = NULL;
1002 struct eh_event_link_info *link;
1003 uint8_t lcore_nb_link = 0;
1004 size_t single_link_size;
1009 if (conf == NULL || links == NULL) {
1010 EH_LOG_ERR("Invalid args");
1014 /* Get eventmode conf */
1015 em_conf = conf->mode_params;
1017 if (em_conf == NULL) {
1018 EH_LOG_ERR("Invalid event mode parameters");
1022 /* Get the number of links registered */
1023 for (i = 0; i < em_conf->nb_link; i++) {
1026 link = &(em_conf->link[i]);
1028 /* Check if we have link intended for this lcore */
1029 if (link->lcore_id == lcore_id) {
1031 /* Update the number of links for this core */
1037 /* Compute size of one entry to be copied */
1038 single_link_size = sizeof(struct eh_event_link_info);
1040 /* Compute size of the buffer required */
1041 cache_size = lcore_nb_link * sizeof(struct eh_event_link_info);
1043 /* Compute size of the buffer required */
1044 link_cache = calloc(1, cache_size);
1046 /* Get the number of links registered */
1047 for (i = 0; i < em_conf->nb_link; i++) {
1050 link = &(em_conf->link[i]);
1052 /* Check if we have link intended for this lcore */
1053 if (link->lcore_id == lcore_id) {
1055 /* Cache the link */
1056 memcpy(&link_cache[index], link, single_link_size);
1063 /* Update the links for application to use the cached links */
1064 *links = link_cache;
1066 /* Return the number of cached links */
1067 return lcore_nb_link;
1071 eh_tx_adapter_configure(struct eventmode_conf *em_conf,
1072 struct tx_adapter_conf *adapter)
1074 struct rte_event_dev_info evdev_default_conf = {0};
1075 struct rte_event_port_conf port_conf = {0};
1076 struct tx_adapter_connection_info *conn;
1077 struct eventdev_params *eventdev_config;
1078 uint8_t tx_port_id = 0;
1079 uint8_t eventdev_id;
1080 uint32_t service_id;
1083 /* Get event dev ID */
1084 eventdev_id = adapter->eventdev_id;
1086 /* Get event device conf */
1087 eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id);
1089 /* Create Tx adapter */
1091 /* Get default configuration of event dev */
1092 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
1094 EH_LOG_ERR("Failed to get event dev info %d", ret);
1098 /* Setup port conf */
1099 port_conf.new_event_threshold =
1100 evdev_default_conf.max_num_events;
1101 port_conf.dequeue_depth =
1102 evdev_default_conf.max_event_port_dequeue_depth;
1103 port_conf.enqueue_depth =
1104 evdev_default_conf.max_event_port_enqueue_depth;
1106 /* Create adapter */
1107 ret = rte_event_eth_tx_adapter_create(adapter->adapter_id,
1108 adapter->eventdev_id, &port_conf);
1110 EH_LOG_ERR("Failed to create tx adapter %d", ret);
1114 /* Setup various connections in the adapter */
1115 for (j = 0; j < adapter->nb_connections; j++) {
1117 /* Get connection */
1118 conn = &(adapter->conn[j]);
1120 /* Add queue to the adapter */
1121 ret = rte_event_eth_tx_adapter_queue_add(adapter->adapter_id,
1122 conn->ethdev_id, conn->ethdev_tx_qid);
1124 EH_LOG_ERR("Failed to add eth queue to tx adapter %d",
1131 * Check if Tx core is assigned. If Tx core is not assigned then
1132 * the adapter has internal port for submitting Tx packets and
1133 * Tx event queue & port setup is not required
1135 if (adapter->tx_core_id == (uint32_t) (-1)) {
1136 /* Internal port is present */
1137 goto skip_tx_queue_port_setup;
1140 /* Setup Tx queue & port */
1142 /* Get event port used by the adapter */
1143 ret = rte_event_eth_tx_adapter_event_port_get(
1144 adapter->adapter_id, &tx_port_id);
1146 EH_LOG_ERR("Failed to get tx adapter port id %d", ret);
1151 * Tx event queue is reserved for Tx adapter. Unlink this queue
1152 * from all other ports
1155 for (j = 0; j < eventdev_config->nb_eventport; j++) {
1156 rte_event_port_unlink(eventdev_id, j,
1157 &(adapter->tx_ev_queue), 1);
1160 /* Link Tx event queue to Tx port */
1161 ret = rte_event_port_link(eventdev_id, tx_port_id,
1162 &(adapter->tx_ev_queue), NULL, 1);
1164 EH_LOG_ERR("Failed to link event queue to port");
1168 /* Get the service ID used by Tx adapter */
1169 ret = rte_event_eth_tx_adapter_service_id_get(adapter->adapter_id,
1171 if (ret != -ESRCH && ret < 0) {
1172 EH_LOG_ERR("Failed to get service id used by tx adapter %d",
1177 rte_service_set_runstate_mapped_check(service_id, 0);
1179 skip_tx_queue_port_setup:
1181 ret = rte_event_eth_tx_adapter_start(adapter->adapter_id);
1183 EH_LOG_ERR("Failed to start tx adapter %d", ret);
1191 eh_initialize_tx_adapter(struct eventmode_conf *em_conf)
1193 struct tx_adapter_conf *adapter;
1196 /* Configure Tx adapters */
1197 for (i = 0; i < em_conf->nb_tx_adapter; i++) {
1198 adapter = &(em_conf->tx_adapter[i]);
1199 ret = eh_tx_adapter_configure(em_conf, adapter);
1201 EH_LOG_ERR("Failed to configure tx adapter %d", ret);
1209 eh_display_operating_mode(struct eventmode_conf *em_conf)
1211 char sched_types[][32] = {
1212 "RTE_SCHED_TYPE_ORDERED",
1213 "RTE_SCHED_TYPE_ATOMIC",
1214 "RTE_SCHED_TYPE_PARALLEL",
1216 EH_LOG_INFO("Operating mode:");
1218 EH_LOG_INFO("\tScheduling type: \t%s",
1219 sched_types[em_conf->ext_params.sched_type]);
1225 eh_display_event_dev_conf(struct eventmode_conf *em_conf)
1227 char queue_mode[][32] = {
1229 "ATQ (ALL TYPE QUEUE)",
1232 char print_buf[256] = { 0 };
1235 EH_LOG_INFO("Event Device Configuration:");
1237 for (i = 0; i < em_conf->nb_eventdev; i++) {
1239 "\tDev ID: %-2d \tQueues: %-2d \tPorts: %-2d",
1240 em_conf->eventdev_config[i].eventdev_id,
1241 em_conf->eventdev_config[i].nb_eventqueue,
1242 em_conf->eventdev_config[i].nb_eventport);
1243 sprintf(print_buf + strlen(print_buf),
1245 queue_mode[em_conf->eventdev_config[i].ev_queue_mode]);
1246 EH_LOG_INFO("%s", print_buf);
1252 eh_display_rx_adapter_conf(struct eventmode_conf *em_conf)
1254 int nb_rx_adapter = em_conf->nb_rx_adapter;
1255 struct rx_adapter_connection_info *conn;
1256 struct rx_adapter_conf *adapter;
1257 char print_buf[256] = { 0 };
1260 EH_LOG_INFO("Rx adapters configured: %d", nb_rx_adapter);
1262 for (i = 0; i < nb_rx_adapter; i++) {
1263 adapter = &(em_conf->rx_adapter[i]);
1265 "\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
1266 adapter->adapter_id,
1267 adapter->nb_connections,
1268 adapter->eventdev_id);
1269 if (adapter->rx_core_id == (uint32_t)-1)
1270 sprintf(print_buf + strlen(print_buf),
1271 "\tRx core: %-2s", "[INTERNAL PORT]");
1272 else if (adapter->rx_core_id == RTE_MAX_LCORE)
1273 sprintf(print_buf + strlen(print_buf),
1274 "\tRx core: %-2s", "[NONE]");
1276 sprintf(print_buf + strlen(print_buf),
1277 "\tRx core: %-2d", adapter->rx_core_id);
1279 EH_LOG_INFO("%s", print_buf);
1281 for (j = 0; j < adapter->nb_connections; j++) {
1282 conn = &(adapter->conn[j]);
1285 "\t\tEthdev ID: %-2d", conn->ethdev_id);
1287 if (conn->ethdev_rx_qid == -1)
1288 sprintf(print_buf + strlen(print_buf),
1289 "\tEth rx queue: %-2s", "ALL");
1291 sprintf(print_buf + strlen(print_buf),
1292 "\tEth rx queue: %-2d",
1293 conn->ethdev_rx_qid);
1295 sprintf(print_buf + strlen(print_buf),
1296 "\tEvent queue: %-2d", conn->eventq_id);
1297 EH_LOG_INFO("%s", print_buf);
1304 eh_display_tx_adapter_conf(struct eventmode_conf *em_conf)
1306 int nb_tx_adapter = em_conf->nb_tx_adapter;
1307 struct tx_adapter_connection_info *conn;
1308 struct tx_adapter_conf *adapter;
1309 char print_buf[256] = { 0 };
1312 EH_LOG_INFO("Tx adapters configured: %d", nb_tx_adapter);
1314 for (i = 0; i < nb_tx_adapter; i++) {
1315 adapter = &(em_conf->tx_adapter[i]);
1317 "\tTx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
1318 adapter->adapter_id,
1319 adapter->nb_connections,
1320 adapter->eventdev_id);
1321 if (adapter->tx_core_id == (uint32_t)-1)
1322 sprintf(print_buf + strlen(print_buf),
1323 "\tTx core: %-2s", "[INTERNAL PORT]");
1324 else if (adapter->tx_core_id == RTE_MAX_LCORE)
1325 sprintf(print_buf + strlen(print_buf),
1326 "\tTx core: %-2s", "[NONE]");
1328 sprintf(print_buf + strlen(print_buf),
1329 "\tTx core: %-2d,\tInput event queue: %-2d",
1330 adapter->tx_core_id, adapter->tx_ev_queue);
1332 EH_LOG_INFO("%s", print_buf);
1334 for (j = 0; j < adapter->nb_connections; j++) {
1335 conn = &(adapter->conn[j]);
1338 "\t\tEthdev ID: %-2d", conn->ethdev_id);
1340 if (conn->ethdev_tx_qid == -1)
1341 sprintf(print_buf + strlen(print_buf),
1342 "\tEth tx queue: %-2s", "ALL");
1344 sprintf(print_buf + strlen(print_buf),
1345 "\tEth tx queue: %-2d",
1346 conn->ethdev_tx_qid);
1347 EH_LOG_INFO("%s", print_buf);
1354 eh_display_link_conf(struct eventmode_conf *em_conf)
1356 struct eh_event_link_info *link;
1357 char print_buf[256] = { 0 };
1360 EH_LOG_INFO("Links configured: %d", em_conf->nb_link);
1362 for (i = 0; i < em_conf->nb_link; i++) {
1363 link = &(em_conf->link[i]);
1366 "\tEvent dev ID: %-2d\tEvent port: %-2d",
1368 link->event_port_id);
1370 if (em_conf->ext_params.all_ev_queue_to_ev_port)
1371 sprintf(print_buf + strlen(print_buf),
1372 "Event queue: %-2s\t", "ALL");
1374 sprintf(print_buf + strlen(print_buf),
1375 "Event queue: %-2d\t", link->eventq_id);
1377 sprintf(print_buf + strlen(print_buf),
1378 "Lcore: %-2d", link->lcore_id);
1379 EH_LOG_INFO("%s", print_buf);
1387 struct eventmode_conf *em_conf = NULL;
1388 struct eh_conf *conf = NULL;
1389 unsigned int eth_core_id;
1390 void *bitmap = NULL;
1393 /* Allocate memory for config */
1394 conf = calloc(1, sizeof(struct eh_conf));
1396 EH_LOG_ERR("Failed to allocate memory for eventmode helper "
1401 /* Set default conf */
1403 /* Packet transfer mode: poll */
1404 conf->mode = EH_PKT_TRANSFER_MODE_POLL;
1405 conf->ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
1407 /* Keep all ethernet ports enabled by default */
1408 conf->eth_portmask = -1;
1410 /* Allocate memory for event mode params */
1411 conf->mode_params = calloc(1, sizeof(struct eventmode_conf));
1412 if (conf->mode_params == NULL) {
1413 EH_LOG_ERR("Failed to allocate memory for event mode params");
1417 /* Get eventmode conf */
1418 em_conf = conf->mode_params;
1420 /* Allocate and initialize bitmap for eth cores */
1421 nb_bytes = rte_bitmap_get_memory_footprint(RTE_MAX_LCORE);
1423 EH_LOG_ERR("Failed to get bitmap footprint");
1427 bitmap = rte_zmalloc("event-helper-ethcore-bitmap", nb_bytes,
1428 RTE_CACHE_LINE_SIZE);
1430 EH_LOG_ERR("Failed to allocate memory for eth cores bitmap\n");
1434 em_conf->eth_core_mask = rte_bitmap_init(RTE_MAX_LCORE, bitmap,
1436 if (!em_conf->eth_core_mask) {
1437 EH_LOG_ERR("Failed to initialize bitmap");
1441 /* Set schedule type as not set */
1442 em_conf->ext_params.sched_type = SCHED_TYPE_NOT_SET;
1444 /* Set two cores as eth cores for Rx & Tx */
1446 /* Use first core other than master core as Rx core */
1447 eth_core_id = rte_get_next_lcore(0, /* curr core */
1448 1, /* skip master core */
1451 rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
1453 /* Use next core as Tx core */
1454 eth_core_id = rte_get_next_lcore(eth_core_id, /* curr core */
1455 1, /* skip master core */
1458 rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
1472 eh_conf_uninit(struct eh_conf *conf)
1474 struct eventmode_conf *em_conf = NULL;
1476 if (!conf || !conf->mode_params)
1479 /* Get eventmode conf */
1480 em_conf = conf->mode_params;
1482 /* Free evenmode configuration memory */
1483 rte_free(em_conf->eth_core_mask);
1489 eh_display_conf(struct eh_conf *conf)
1491 struct eventmode_conf *em_conf;
1494 EH_LOG_ERR("Invalid event helper configuration");
1498 if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1501 if (conf->mode_params == NULL) {
1502 EH_LOG_ERR("Invalid event mode parameters");
1506 /* Get eventmode conf */
1507 em_conf = (struct eventmode_conf *)(conf->mode_params);
1509 /* Display user exposed operating modes */
1510 eh_display_operating_mode(em_conf);
1512 /* Display event device conf */
1513 eh_display_event_dev_conf(em_conf);
1515 /* Display Rx adapter conf */
1516 eh_display_rx_adapter_conf(em_conf);
1518 /* Display Tx adapter conf */
1519 eh_display_tx_adapter_conf(em_conf);
1521 /* Display event-lcore link */
1522 eh_display_link_conf(em_conf);
1526 eh_devs_init(struct eh_conf *conf)
1528 struct eventmode_conf *em_conf;
1533 EH_LOG_ERR("Invalid event helper configuration");
1537 if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1540 if (conf->mode_params == NULL) {
1541 EH_LOG_ERR("Invalid event mode parameters");
1545 /* Get eventmode conf */
1546 em_conf = conf->mode_params;
1548 /* Eventmode conf would need eth portmask */
1549 em_conf->eth_portmask = conf->eth_portmask;
1551 /* Validate the requested config */
1552 ret = eh_validate_conf(em_conf);
1554 EH_LOG_ERR("Failed to validate the requested config %d", ret);
1558 /* Display the current configuration */
1559 eh_display_conf(conf);
1561 /* Stop eth devices before setting up adapter */
1562 RTE_ETH_FOREACH_DEV(port_id) {
1564 /* Use only the ports enabled */
1565 if ((conf->eth_portmask & (1 << port_id)) == 0)
1568 rte_eth_dev_stop(port_id);
1571 /* Setup eventdev */
1572 ret = eh_initialize_eventdev(em_conf);
1574 EH_LOG_ERR("Failed to initialize event dev %d", ret);
1578 /* Setup Rx adapter */
1579 ret = eh_initialize_rx_adapter(em_conf);
1581 EH_LOG_ERR("Failed to initialize rx adapter %d", ret);
1585 /* Setup Tx adapter */
1586 ret = eh_initialize_tx_adapter(em_conf);
1588 EH_LOG_ERR("Failed to initialize tx adapter %d", ret);
1592 /* Start eth devices after setting up adapter */
1593 RTE_ETH_FOREACH_DEV(port_id) {
1595 /* Use only the ports enabled */
1596 if ((conf->eth_portmask & (1 << port_id)) == 0)
1599 ret = rte_eth_dev_start(port_id);
1601 EH_LOG_ERR("Failed to start eth dev %d, %d",
1611 eh_devs_uninit(struct eh_conf *conf)
1613 struct eventmode_conf *em_conf;
1618 EH_LOG_ERR("Invalid event helper configuration");
1622 if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1625 if (conf->mode_params == NULL) {
1626 EH_LOG_ERR("Invalid event mode parameters");
1630 /* Get eventmode conf */
1631 em_conf = conf->mode_params;
1633 /* Stop and release rx adapters */
1634 for (i = 0; i < em_conf->nb_rx_adapter; i++) {
1636 id = em_conf->rx_adapter[i].adapter_id;
1637 ret = rte_event_eth_rx_adapter_stop(id);
1639 EH_LOG_ERR("Failed to stop rx adapter %d", ret);
1643 for (j = 0; j < em_conf->rx_adapter[i].nb_connections; j++) {
1645 ret = rte_event_eth_rx_adapter_queue_del(id,
1646 em_conf->rx_adapter[i].conn[j].ethdev_id, -1);
1649 "Failed to remove rx adapter queues %d",
1655 ret = rte_event_eth_rx_adapter_free(id);
1657 EH_LOG_ERR("Failed to free rx adapter %d", ret);
1662 /* Stop and release event devices */
1663 for (i = 0; i < em_conf->nb_eventdev; i++) {
1665 id = em_conf->eventdev_config[i].eventdev_id;
1666 rte_event_dev_stop(id);
1668 ret = rte_event_dev_close(id);
1670 EH_LOG_ERR("Failed to close event dev %d, %d", id, ret);
1675 /* Stop and release tx adapters */
1676 for (i = 0; i < em_conf->nb_tx_adapter; i++) {
1678 id = em_conf->tx_adapter[i].adapter_id;
1679 ret = rte_event_eth_tx_adapter_stop(id);
1681 EH_LOG_ERR("Failed to stop tx adapter %d", ret);
1685 for (j = 0; j < em_conf->tx_adapter[i].nb_connections; j++) {
1687 ret = rte_event_eth_tx_adapter_queue_del(id,
1688 em_conf->tx_adapter[i].conn[j].ethdev_id, -1);
1691 "Failed to remove tx adapter queues %d",
1697 ret = rte_event_eth_tx_adapter_free(id);
1699 EH_LOG_ERR("Failed to free tx adapter %d", ret);
1708 eh_launch_worker(struct eh_conf *conf, struct eh_app_worker_params *app_wrkr,
1709 uint8_t nb_wrkr_param)
1711 struct eh_app_worker_params *match_wrkr;
1712 struct eh_event_link_info *links = NULL;
1713 struct eventmode_conf *em_conf;
1718 EH_LOG_ERR("Invalid event helper configuration");
1722 if (conf->mode_params == NULL) {
1723 EH_LOG_ERR("Invalid event mode parameters");
1727 /* Get eventmode conf */
1728 em_conf = conf->mode_params;
1731 lcore_id = rte_lcore_id();
1733 /* Check if this is eth core */
1734 if (rte_bitmap_get(em_conf->eth_core_mask, lcore_id)) {
1735 eh_start_worker_eth_core(em_conf, lcore_id);
1739 if (app_wrkr == NULL || nb_wrkr_param == 0) {
1740 EH_LOG_ERR("Invalid args");
1745 * This is a regular worker thread. The application registers
1746 * multiple workers with various capabilities. Run worker
1747 * based on the selected capabilities of the event
1748 * device configured.
1751 /* Get the first matching worker for the event device */
1752 match_wrkr = eh_find_worker(lcore_id, conf, app_wrkr, nb_wrkr_param);
1753 if (match_wrkr == NULL) {
1754 EH_LOG_ERR("Failed to match worker registered for lcore %d",
1756 goto clean_and_exit;
1759 /* Verify sanity of the matched worker */
1760 if (eh_verify_match_worker(match_wrkr) != 1) {
1761 EH_LOG_ERR("Failed to validate the matched worker");
1762 goto clean_and_exit;
1765 /* Get worker links */
1766 nb_links = eh_get_event_lcore_links(lcore_id, conf, &links);
1768 /* Launch the worker thread */
1769 match_wrkr->worker_thread(links, nb_links);
1771 /* Free links info memory */
1776 /* Flag eth_cores to stop, if started */
1777 eh_stop_worker_eth_core();
1781 eh_get_tx_queue(struct eh_conf *conf, uint8_t eventdev_id)
1783 struct eventdev_params *eventdev_config;
1784 struct eventmode_conf *em_conf;
1787 EH_LOG_ERR("Invalid event helper configuration");
1791 if (conf->mode_params == NULL) {
1792 EH_LOG_ERR("Invalid event mode parameters");
1796 /* Get eventmode conf */
1797 em_conf = conf->mode_params;
1799 /* Get event device conf */
1800 eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id);
1802 if (eventdev_config == NULL) {
1803 EH_LOG_ERR("Failed to read eventdev config");
1808 * The last queue is reserved to be used as atomic queue for the
1809 * last stage (eth packet tx stage)
1811 return eventdev_config->nb_eventqueue - 1;