1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
4 #include <rte_bitmap.h>
5 #include <rte_ethdev.h>
6 #include <rte_eventdev.h>
7 #include <rte_malloc.h>
9 #include "event_helper.h"
11 static inline unsigned int
12 eh_get_next_active_core(struct eventmode_conf *em_conf, unsigned int prev_core)
14 unsigned int next_core;
16 /* Get next active core skipping cores reserved as eth cores */
18 /* Get the next core */
19 next_core = rte_get_next_lcore(prev_core, 0, 0);
21 /* Check if we have reached max lcores */
22 if (next_core == RTE_MAX_LCORE)
25 prev_core = next_core;
26 } while (rte_bitmap_get(em_conf->eth_core_mask, next_core));
32 eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
34 int lcore_count, nb_eventdev, nb_eth_dev, ret;
35 struct eventdev_params *eventdev_config;
36 struct rte_event_dev_info dev_info;
38 /* Get the number of event devices */
39 nb_eventdev = rte_event_dev_count();
40 if (nb_eventdev == 0) {
41 EH_LOG_ERR("No event devices detected");
45 if (nb_eventdev != 1) {
46 EH_LOG_ERR("Event mode does not support multiple event devices. "
47 "Please provide only one event device.");
51 /* Get the number of eth devs */
52 nb_eth_dev = rte_eth_dev_count_avail();
53 if (nb_eth_dev == 0) {
54 EH_LOG_ERR("No eth devices detected");
58 /* Get the number of lcores */
59 lcore_count = rte_lcore_count();
61 /* Read event device info */
62 ret = rte_event_dev_info_get(0, &dev_info);
64 EH_LOG_ERR("Failed to read event device info %d", ret);
68 /* Check if enough ports are available */
69 if (dev_info.max_event_ports < 2) {
70 EH_LOG_ERR("Not enough event ports available");
74 /* Get the first event dev conf */
75 eventdev_config = &(em_conf->eventdev_config[0]);
77 /* Save number of queues & ports available */
78 eventdev_config->eventdev_id = 0;
79 eventdev_config->nb_eventqueue = dev_info.max_event_queues;
80 eventdev_config->nb_eventport = dev_info.max_event_ports;
81 eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
83 /* Check if there are more queues than required */
84 if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {
85 /* One queue is reserved for Tx */
86 eventdev_config->nb_eventqueue = nb_eth_dev + 1;
89 /* Check if there are more ports than required */
90 if (eventdev_config->nb_eventport > lcore_count) {
91 /* One port per lcore is enough */
92 eventdev_config->nb_eventport = lcore_count;
95 /* Update the number of event devices */
96 em_conf->nb_eventdev++;
102 eh_set_default_conf_link(struct eventmode_conf *em_conf)
104 struct eventdev_params *eventdev_config;
105 struct eh_event_link_info *link;
106 unsigned int lcore_id = -1;
110 * Create a 1:1 mapping from event ports to cores. If the number
111 * of event ports is lesser than the cores, some cores won't
112 * execute worker. If there are more event ports, then some ports
118 * The event queue-port mapping is done according to the link. Since
119 * we are falling back to the default link config, enabling
120 * "all_ev_queue_to_ev_port" mode flag. This will map all queues
123 em_conf->ext_params.all_ev_queue_to_ev_port = 1;
125 /* Get first event dev conf */
126 eventdev_config = &(em_conf->eventdev_config[0]);
128 /* Loop through the ports */
129 for (i = 0; i < eventdev_config->nb_eventport; i++) {
131 /* Get next active core id */
132 lcore_id = eh_get_next_active_core(em_conf,
135 if (lcore_id == RTE_MAX_LCORE) {
136 /* Reached max cores */
140 /* Save the current combination as one link */
143 link_index = em_conf->nb_link;
145 /* Get the corresponding link */
146 link = &(em_conf->link[link_index]);
149 link->eventdev_id = eventdev_config->eventdev_id;
150 link->event_port_id = i;
151 link->lcore_id = lcore_id;
154 * Don't set eventq_id as by default all queues
155 * need to be mapped to the port, which is controlled
156 * by the operating mode.
159 /* Update number of links */
167 eh_validate_conf(struct eventmode_conf *em_conf)
172 * Check if event devs are specified. Else probe the event devices
173 * and initialize the config with all ports & queues available
175 if (em_conf->nb_eventdev == 0) {
176 ret = eh_set_default_conf_eventdev(em_conf);
182 * Check if links are specified. Else generate a default config for
183 * the event ports used.
185 if (em_conf->nb_link == 0) {
186 ret = eh_set_default_conf_link(em_conf);
195 eh_initialize_eventdev(struct eventmode_conf *em_conf)
197 struct rte_event_queue_conf eventq_conf = {0};
198 struct rte_event_dev_info evdev_default_conf;
199 struct rte_event_dev_config eventdev_conf;
200 struct eventdev_params *eventdev_config;
201 int nb_eventdev = em_conf->nb_eventdev;
202 struct eh_event_link_info *link;
203 uint8_t *queue = NULL;
209 for (i = 0; i < nb_eventdev; i++) {
211 /* Get eventdev config */
212 eventdev_config = &(em_conf->eventdev_config[i]);
214 /* Get event dev ID */
215 eventdev_id = eventdev_config->eventdev_id;
217 /* Get the number of queues */
218 nb_eventqueue = eventdev_config->nb_eventqueue;
220 /* Reset the default conf */
221 memset(&evdev_default_conf, 0,
222 sizeof(struct rte_event_dev_info));
224 /* Get default conf of eventdev */
225 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
228 "Error in getting event device info[devID:%d]",
233 memset(&eventdev_conf, 0, sizeof(struct rte_event_dev_config));
234 eventdev_conf.nb_events_limit =
235 evdev_default_conf.max_num_events;
236 eventdev_conf.nb_event_queues = nb_eventqueue;
237 eventdev_conf.nb_event_ports =
238 eventdev_config->nb_eventport;
239 eventdev_conf.nb_event_queue_flows =
240 evdev_default_conf.max_event_queue_flows;
241 eventdev_conf.nb_event_port_dequeue_depth =
242 evdev_default_conf.max_event_port_dequeue_depth;
243 eventdev_conf.nb_event_port_enqueue_depth =
244 evdev_default_conf.max_event_port_enqueue_depth;
246 /* Configure event device */
247 ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
249 EH_LOG_ERR("Error in configuring event device");
253 /* Configure event queues */
254 for (j = 0; j < nb_eventqueue; j++) {
256 memset(&eventq_conf, 0,
257 sizeof(struct rte_event_queue_conf));
259 /* Per event dev queues can be ATQ or SINGLE LINK */
260 eventq_conf.event_queue_cfg =
261 eventdev_config->ev_queue_mode;
263 * All queues need to be set with sched_type as
264 * schedule type for the application stage. One queue
265 * would be reserved for the final eth tx stage. This
266 * will be an atomic queue.
268 if (j == nb_eventqueue-1) {
269 eventq_conf.schedule_type =
270 RTE_SCHED_TYPE_ATOMIC;
272 eventq_conf.schedule_type =
273 em_conf->ext_params.sched_type;
276 /* Set max atomic flows to 1024 */
277 eventq_conf.nb_atomic_flows = 1024;
278 eventq_conf.nb_atomic_order_sequences = 1024;
280 /* Setup the queue */
281 ret = rte_event_queue_setup(eventdev_id, j,
284 EH_LOG_ERR("Failed to setup event queue %d",
290 /* Configure event ports */
291 for (j = 0; j < eventdev_config->nb_eventport; j++) {
292 ret = rte_event_port_setup(eventdev_id, j, NULL);
294 EH_LOG_ERR("Failed to setup event port %d",
301 /* Make event queue - event port link */
302 for (j = 0; j < em_conf->nb_link; j++) {
305 link = &(em_conf->link[j]);
307 /* Get event dev ID */
308 eventdev_id = link->eventdev_id;
311 * If "all_ev_queue_to_ev_port" params flag is selected, all
312 * queues need to be mapped to the port.
314 if (em_conf->ext_params.all_ev_queue_to_ev_port)
317 queue = &(link->eventq_id);
319 /* Link queue to port */
320 ret = rte_event_port_link(eventdev_id, link->event_port_id,
323 EH_LOG_ERR("Failed to link event port %d", ret);
328 /* Start event devices */
329 for (i = 0; i < nb_eventdev; i++) {
331 /* Get eventdev config */
332 eventdev_config = &(em_conf->eventdev_config[i]);
334 ret = rte_event_dev_start(eventdev_config->eventdev_id);
336 EH_LOG_ERR("Failed to start event device %d, %d",
345 eh_devs_init(struct eh_conf *conf)
347 struct eventmode_conf *em_conf;
352 EH_LOG_ERR("Invalid event helper configuration");
356 if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
359 if (conf->mode_params == NULL) {
360 EH_LOG_ERR("Invalid event mode parameters");
364 /* Get eventmode conf */
365 em_conf = conf->mode_params;
367 /* Validate the requested config */
368 ret = eh_validate_conf(em_conf);
370 EH_LOG_ERR("Failed to validate the requested config %d", ret);
374 /* Stop eth devices before setting up adapter */
375 RTE_ETH_FOREACH_DEV(port_id) {
377 /* Use only the ports enabled */
378 if ((conf->eth_portmask & (1 << port_id)) == 0)
381 rte_eth_dev_stop(port_id);
385 ret = eh_initialize_eventdev(em_conf);
387 EH_LOG_ERR("Failed to initialize event dev %d", ret);
391 /* Start eth devices after setting up adapter */
392 RTE_ETH_FOREACH_DEV(port_id) {
394 /* Use only the ports enabled */
395 if ((conf->eth_portmask & (1 << port_id)) == 0)
398 ret = rte_eth_dev_start(port_id);
400 EH_LOG_ERR("Failed to start eth dev %d, %d",
410 eh_devs_uninit(struct eh_conf *conf)
412 struct eventmode_conf *em_conf;
417 EH_LOG_ERR("Invalid event helper configuration");
421 if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
424 if (conf->mode_params == NULL) {
425 EH_LOG_ERR("Invalid event mode parameters");
429 /* Get eventmode conf */
430 em_conf = conf->mode_params;
432 /* Stop and release event devices */
433 for (i = 0; i < em_conf->nb_eventdev; i++) {
435 id = em_conf->eventdev_config[i].eventdev_id;
436 rte_event_dev_stop(id);
438 ret = rte_event_dev_close(id);
440 EH_LOG_ERR("Failed to close event dev %d, %d", id, ret);