examples/ipsec-secgw: add event mode
[dpdk.git] / examples / ipsec-secgw / event_helper.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell International Ltd.
3  */
4 #include <rte_bitmap.h>
5 #include <rte_ethdev.h>
6 #include <rte_eventdev.h>
7 #include <rte_event_eth_rx_adapter.h>
8 #include <rte_event_eth_tx_adapter.h>
9 #include <rte_malloc.h>
10 #include <stdbool.h>
11
12 #include "event_helper.h"
13
14 static volatile bool eth_core_running;
15
16 static int
17 eh_get_enabled_cores(struct rte_bitmap *eth_core_mask)
18 {
19         int i, count = 0;
20
21         RTE_LCORE_FOREACH(i) {
22                 /* Check if this core is enabled in core mask*/
23                 if (rte_bitmap_get(eth_core_mask, i)) {
24                         /* Found enabled core */
25                         count++;
26                 }
27         }
28         return count;
29 }
30
31 static inline unsigned int
32 eh_get_next_eth_core(struct eventmode_conf *em_conf)
33 {
34         static unsigned int prev_core = -1;
35         unsigned int next_core;
36
37         /*
38          * Make sure we have at least one eth core running, else the following
39          * logic would lead to an infinite loop.
40          */
41         if (eh_get_enabled_cores(em_conf->eth_core_mask) == 0) {
42                 EH_LOG_ERR("No enabled eth core found");
43                 return RTE_MAX_LCORE;
44         }
45
46         /* Only some cores are marked as eth cores, skip others */
47         do {
48                 /* Get the next core */
49                 next_core = rte_get_next_lcore(prev_core, 0, 1);
50
51                 /* Check if we have reached max lcores */
52                 if (next_core == RTE_MAX_LCORE)
53                         return next_core;
54
55                 /* Update prev_core */
56                 prev_core = next_core;
57         } while (!(rte_bitmap_get(em_conf->eth_core_mask, next_core)));
58
59         return next_core;
60 }
61
62 static inline unsigned int
63 eh_get_next_active_core(struct eventmode_conf *em_conf, unsigned int prev_core)
64 {
65         unsigned int next_core;
66
67         /* Get next active core skipping cores reserved as eth cores */
68         do {
69                 /* Get the next core */
70                 next_core = rte_get_next_lcore(prev_core, 0, 0);
71
72                 /* Check if we have reached max lcores */
73                 if (next_core == RTE_MAX_LCORE)
74                         return next_core;
75
76                 prev_core = next_core;
77         } while (rte_bitmap_get(em_conf->eth_core_mask, next_core));
78
79         return next_core;
80 }
81
82 static struct eventdev_params *
83 eh_get_eventdev_params(struct eventmode_conf *em_conf, uint8_t eventdev_id)
84 {
85         int i;
86
87         for (i = 0; i < em_conf->nb_eventdev; i++) {
88                 if (em_conf->eventdev_config[i].eventdev_id == eventdev_id)
89                         break;
90         }
91
92         /* No match */
93         if (i == em_conf->nb_eventdev)
94                 return NULL;
95
96         return &(em_conf->eventdev_config[i]);
97 }
98
99 static inline bool
100 eh_dev_has_rx_internal_port(uint8_t eventdev_id)
101 {
102         bool flag = true;
103         int j;
104
105         RTE_ETH_FOREACH_DEV(j) {
106                 uint32_t caps = 0;
107
108                 rte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps);
109                 if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
110                         flag = false;
111         }
112         return flag;
113 }
114
115 static inline bool
116 eh_dev_has_tx_internal_port(uint8_t eventdev_id)
117 {
118         bool flag = true;
119         int j;
120
121         RTE_ETH_FOREACH_DEV(j) {
122                 uint32_t caps = 0;
123
124                 rte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps);
125                 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
126                         flag = false;
127         }
128         return flag;
129 }
130
131 static inline bool
132 eh_dev_has_burst_mode(uint8_t dev_id)
133 {
134         struct rte_event_dev_info dev_info;
135
136         rte_event_dev_info_get(dev_id, &dev_info);
137         return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
138                         true : false;
139 }
140
141 static int
142 eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
143 {
144         int lcore_count, nb_eventdev, nb_eth_dev, ret;
145         struct eventdev_params *eventdev_config;
146         struct rte_event_dev_info dev_info;
147
148         /* Get the number of event devices */
149         nb_eventdev = rte_event_dev_count();
150         if (nb_eventdev == 0) {
151                 EH_LOG_ERR("No event devices detected");
152                 return -EINVAL;
153         }
154
155         if (nb_eventdev != 1) {
156                 EH_LOG_ERR("Event mode does not support multiple event devices. "
157                            "Please provide only one event device.");
158                 return -EINVAL;
159         }
160
161         /* Get the number of eth devs */
162         nb_eth_dev = rte_eth_dev_count_avail();
163         if (nb_eth_dev == 0) {
164                 EH_LOG_ERR("No eth devices detected");
165                 return -EINVAL;
166         }
167
168         /* Get the number of lcores */
169         lcore_count = rte_lcore_count();
170
171         /* Read event device info */
172         ret = rte_event_dev_info_get(0, &dev_info);
173         if (ret < 0) {
174                 EH_LOG_ERR("Failed to read event device info %d", ret);
175                 return ret;
176         }
177
178         /* Check if enough ports are available */
179         if (dev_info.max_event_ports < 2) {
180                 EH_LOG_ERR("Not enough event ports available");
181                 return -EINVAL;
182         }
183
184         /* Get the first event dev conf */
185         eventdev_config = &(em_conf->eventdev_config[0]);
186
187         /* Save number of queues & ports available */
188         eventdev_config->eventdev_id = 0;
189         eventdev_config->nb_eventqueue = dev_info.max_event_queues;
190         eventdev_config->nb_eventport = dev_info.max_event_ports;
191         eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
192
193         /* Check if there are more queues than required */
194         if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {
195                 /* One queue is reserved for Tx */
196                 eventdev_config->nb_eventqueue = nb_eth_dev + 1;
197         }
198
199         /* Check if there are more ports than required */
200         if (eventdev_config->nb_eventport > lcore_count) {
201                 /* One port per lcore is enough */
202                 eventdev_config->nb_eventport = lcore_count;
203         }
204
205         /* Update the number of event devices */
206         em_conf->nb_eventdev++;
207
208         return 0;
209 }
210
211 static void
212 eh_do_capability_check(struct eventmode_conf *em_conf)
213 {
214         struct eventdev_params *eventdev_config;
215         int all_internal_ports = 1;
216         uint32_t eventdev_id;
217         int i;
218
219         for (i = 0; i < em_conf->nb_eventdev; i++) {
220
221                 /* Get the event dev conf */
222                 eventdev_config = &(em_conf->eventdev_config[i]);
223                 eventdev_id = eventdev_config->eventdev_id;
224
225                 /* Check if event device has internal port for Rx & Tx */
226                 if (eh_dev_has_rx_internal_port(eventdev_id) &&
227                     eh_dev_has_tx_internal_port(eventdev_id)) {
228                         eventdev_config->all_internal_ports = 1;
229                 } else {
230                         all_internal_ports = 0;
231                 }
232         }
233
234         /*
235          * If Rx & Tx internal ports are supported by all event devices then
236          * eth cores won't be required. Override the eth core mask requested
237          * and decrement number of event queues by one as it won't be needed
238          * for Tx.
239          */
240         if (all_internal_ports) {
241                 rte_bitmap_reset(em_conf->eth_core_mask);
242                 for (i = 0; i < em_conf->nb_eventdev; i++)
243                         em_conf->eventdev_config[i].nb_eventqueue--;
244         }
245 }
246
247 static int
248 eh_set_default_conf_link(struct eventmode_conf *em_conf)
249 {
250         struct eventdev_params *eventdev_config;
251         struct eh_event_link_info *link;
252         unsigned int lcore_id = -1;
253         int i, link_index;
254
255         /*
256          * Create a 1:1 mapping from event ports to cores. If the number
257          * of event ports is lesser than the cores, some cores won't
258          * execute worker. If there are more event ports, then some ports
259          * won't be used.
260          *
261          */
262
263         /*
264          * The event queue-port mapping is done according to the link. Since
265          * we are falling back to the default link config, enabling
266          * "all_ev_queue_to_ev_port" mode flag. This will map all queues
267          * to the port.
268          */
269         em_conf->ext_params.all_ev_queue_to_ev_port = 1;
270
271         /* Get first event dev conf */
272         eventdev_config = &(em_conf->eventdev_config[0]);
273
274         /* Loop through the ports */
275         for (i = 0; i < eventdev_config->nb_eventport; i++) {
276
277                 /* Get next active core id */
278                 lcore_id = eh_get_next_active_core(em_conf,
279                                 lcore_id);
280
281                 if (lcore_id == RTE_MAX_LCORE) {
282                         /* Reached max cores */
283                         return 0;
284                 }
285
286                 /* Save the current combination as one link */
287
288                 /* Get the index */
289                 link_index = em_conf->nb_link;
290
291                 /* Get the corresponding link */
292                 link = &(em_conf->link[link_index]);
293
294                 /* Save link */
295                 link->eventdev_id = eventdev_config->eventdev_id;
296                 link->event_port_id = i;
297                 link->lcore_id = lcore_id;
298
299                 /*
300                  * Don't set eventq_id as by default all queues
301                  * need to be mapped to the port, which is controlled
302                  * by the operating mode.
303                  */
304
305                 /* Update number of links */
306                 em_conf->nb_link++;
307         }
308
309         return 0;
310 }
311
312 static int
313 eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
314 {
315         struct rx_adapter_connection_info *conn;
316         struct eventdev_params *eventdev_config;
317         struct rx_adapter_conf *adapter;
318         bool rx_internal_port = true;
319         bool single_ev_queue = false;
320         int nb_eventqueue;
321         uint32_t caps = 0;
322         int eventdev_id;
323         int nb_eth_dev;
324         int adapter_id;
325         int conn_id;
326         int i;
327
328         /* Create one adapter with eth queues mapped to event queue(s) */
329
330         if (em_conf->nb_eventdev == 0) {
331                 EH_LOG_ERR("No event devs registered");
332                 return -EINVAL;
333         }
334
335         /* Get the number of eth devs */
336         nb_eth_dev = rte_eth_dev_count_avail();
337
338         /* Use the first event dev */
339         eventdev_config = &(em_conf->eventdev_config[0]);
340
341         /* Get eventdev ID */
342         eventdev_id = eventdev_config->eventdev_id;
343         adapter_id = 0;
344
345         /* Get adapter conf */
346         adapter = &(em_conf->rx_adapter[adapter_id]);
347
348         /* Set adapter conf */
349         adapter->eventdev_id = eventdev_id;
350         adapter->adapter_id = adapter_id;
351
352         /*
353          * If event device does not have internal ports for passing
354          * packets then reserved one queue for Tx path
355          */
356         nb_eventqueue = eventdev_config->all_internal_ports ?
357                         eventdev_config->nb_eventqueue :
358                         eventdev_config->nb_eventqueue - 1;
359
360         /*
361          * Map all queues of eth device (port) to an event queue. If there
362          * are more event queues than eth ports then create 1:1 mapping.
363          * Otherwise map all eth ports to a single event queue.
364          */
365         if (nb_eth_dev > nb_eventqueue)
366                 single_ev_queue = true;
367
368         for (i = 0; i < nb_eth_dev; i++) {
369
370                 /* Use only the ports enabled */
371                 if ((em_conf->eth_portmask & (1 << i)) == 0)
372                         continue;
373
374                 /* Get the connection id */
375                 conn_id = adapter->nb_connections;
376
377                 /* Get the connection */
378                 conn = &(adapter->conn[conn_id]);
379
380                 /* Set mapping between eth ports & event queues*/
381                 conn->ethdev_id = i;
382                 conn->eventq_id = single_ev_queue ? 0 : i;
383
384                 /* Add all eth queues eth port to event queue */
385                 conn->ethdev_rx_qid = -1;
386
387                 /* Get Rx adapter capabilities */
388                 rte_event_eth_rx_adapter_caps_get(eventdev_id, i, &caps);
389                 if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
390                         rx_internal_port = false;
391
392                 /* Update no of connections */
393                 adapter->nb_connections++;
394
395         }
396
397         if (rx_internal_port) {
398                 /* Rx core is not required */
399                 adapter->rx_core_id = -1;
400         } else {
401                 /* Rx core is required */
402                 adapter->rx_core_id = eh_get_next_eth_core(em_conf);
403         }
404
405         /* We have setup one adapter */
406         em_conf->nb_rx_adapter = 1;
407
408         return 0;
409 }
410
411 static int
412 eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)
413 {
414         struct tx_adapter_connection_info *conn;
415         struct eventdev_params *eventdev_config;
416         struct tx_adapter_conf *tx_adapter;
417         bool tx_internal_port = true;
418         uint32_t caps = 0;
419         int eventdev_id;
420         int adapter_id;
421         int nb_eth_dev;
422         int conn_id;
423         int i;
424
425         /*
426          * Create one Tx adapter with all eth queues mapped to event queues
427          * 1:1.
428          */
429
430         if (em_conf->nb_eventdev == 0) {
431                 EH_LOG_ERR("No event devs registered");
432                 return -EINVAL;
433         }
434
435         /* Get the number of eth devs */
436         nb_eth_dev = rte_eth_dev_count_avail();
437
438         /* Use the first event dev */
439         eventdev_config = &(em_conf->eventdev_config[0]);
440
441         /* Get eventdev ID */
442         eventdev_id = eventdev_config->eventdev_id;
443         adapter_id = 0;
444
445         /* Get adapter conf */
446         tx_adapter = &(em_conf->tx_adapter[adapter_id]);
447
448         /* Set adapter conf */
449         tx_adapter->eventdev_id = eventdev_id;
450         tx_adapter->adapter_id = adapter_id;
451
452         /*
453          * Map all Tx queues of the eth device (port) to the event device.
454          */
455
456         /* Set defaults for connections */
457
458         /*
459          * One eth device (port) is one connection. Map all Tx queues
460          * of the device to the Tx adapter.
461          */
462
463         for (i = 0; i < nb_eth_dev; i++) {
464
465                 /* Use only the ports enabled */
466                 if ((em_conf->eth_portmask & (1 << i)) == 0)
467                         continue;
468
469                 /* Get the connection id */
470                 conn_id = tx_adapter->nb_connections;
471
472                 /* Get the connection */
473                 conn = &(tx_adapter->conn[conn_id]);
474
475                 /* Add ethdev to connections */
476                 conn->ethdev_id = i;
477
478                 /* Add all eth tx queues to adapter */
479                 conn->ethdev_tx_qid = -1;
480
481                 /* Get Tx adapter capabilities */
482                 rte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);
483                 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
484                         tx_internal_port = false;
485
486                 /* Update no of connections */
487                 tx_adapter->nb_connections++;
488         }
489
490         if (tx_internal_port) {
491                 /* Tx core is not required */
492                 tx_adapter->tx_core_id = -1;
493         } else {
494                 /* Tx core is required */
495                 tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);
496
497                 /*
498                  * Use one event queue per adapter for submitting packets
499                  * for Tx. Reserving the last queue available
500                  */
501                 /* Queue numbers start at 0 */
502                 tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;
503         }
504
505         /* We have setup one adapter */
506         em_conf->nb_tx_adapter = 1;
507         return 0;
508 }
509
510 static int
511 eh_validate_conf(struct eventmode_conf *em_conf)
512 {
513         int ret;
514
515         /*
516          * Check if event devs are specified. Else probe the event devices
517          * and initialize the config with all ports & queues available
518          */
519         if (em_conf->nb_eventdev == 0) {
520                 ret = eh_set_default_conf_eventdev(em_conf);
521                 if (ret != 0)
522                         return ret;
523         }
524
525         /* Perform capability check for the selected event devices */
526         eh_do_capability_check(em_conf);
527
528         /*
529          * Check if links are specified. Else generate a default config for
530          * the event ports used.
531          */
532         if (em_conf->nb_link == 0) {
533                 ret = eh_set_default_conf_link(em_conf);
534                 if (ret != 0)
535                         return ret;
536         }
537
538         /*
539          * Check if rx adapters are specified. Else generate a default config
540          * with one rx adapter and all eth queues - event queue mapped.
541          */
542         if (em_conf->nb_rx_adapter == 0) {
543                 ret = eh_set_default_conf_rx_adapter(em_conf);
544                 if (ret != 0)
545                         return ret;
546         }
547
548         /*
549          * Check if tx adapters are specified. Else generate a default config
550          * with one tx adapter.
551          */
552         if (em_conf->nb_tx_adapter == 0) {
553                 ret = eh_set_default_conf_tx_adapter(em_conf);
554                 if (ret != 0)
555                         return ret;
556         }
557
558         return 0;
559 }
560
561 static int
562 eh_initialize_eventdev(struct eventmode_conf *em_conf)
563 {
564         struct rte_event_queue_conf eventq_conf = {0};
565         struct rte_event_dev_info evdev_default_conf;
566         struct rte_event_dev_config eventdev_conf;
567         struct eventdev_params *eventdev_config;
568         int nb_eventdev = em_conf->nb_eventdev;
569         struct eh_event_link_info *link;
570         uint8_t *queue = NULL;
571         uint8_t eventdev_id;
572         int nb_eventqueue;
573         uint8_t i, j;
574         int ret;
575
576         for (i = 0; i < nb_eventdev; i++) {
577
578                 /* Get eventdev config */
579                 eventdev_config = &(em_conf->eventdev_config[i]);
580
581                 /* Get event dev ID */
582                 eventdev_id = eventdev_config->eventdev_id;
583
584                 /* Get the number of queues */
585                 nb_eventqueue = eventdev_config->nb_eventqueue;
586
587                 /* Reset the default conf */
588                 memset(&evdev_default_conf, 0,
589                         sizeof(struct rte_event_dev_info));
590
591                 /* Get default conf of eventdev */
592                 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
593                 if (ret < 0) {
594                         EH_LOG_ERR(
595                                 "Error in getting event device info[devID:%d]",
596                                 eventdev_id);
597                         return ret;
598                 }
599
600                 memset(&eventdev_conf, 0, sizeof(struct rte_event_dev_config));
601                 eventdev_conf.nb_events_limit =
602                                 evdev_default_conf.max_num_events;
603                 eventdev_conf.nb_event_queues = nb_eventqueue;
604                 eventdev_conf.nb_event_ports =
605                                 eventdev_config->nb_eventport;
606                 eventdev_conf.nb_event_queue_flows =
607                                 evdev_default_conf.max_event_queue_flows;
608                 eventdev_conf.nb_event_port_dequeue_depth =
609                                 evdev_default_conf.max_event_port_dequeue_depth;
610                 eventdev_conf.nb_event_port_enqueue_depth =
611                                 evdev_default_conf.max_event_port_enqueue_depth;
612
613                 /* Configure event device */
614                 ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
615                 if (ret < 0) {
616                         EH_LOG_ERR("Error in configuring event device");
617                         return ret;
618                 }
619
620                 /* Configure event queues */
621                 for (j = 0; j < nb_eventqueue; j++) {
622
623                         memset(&eventq_conf, 0,
624                                         sizeof(struct rte_event_queue_conf));
625
626                         /* Per event dev queues can be ATQ or SINGLE LINK */
627                         eventq_conf.event_queue_cfg =
628                                         eventdev_config->ev_queue_mode;
629                         /*
630                          * All queues need to be set with sched_type as
631                          * schedule type for the application stage. One
632                          * queue would be reserved for the final eth tx
633                          * stage if event device does not have internal
634                          * ports. This will be an atomic queue.
635                          */
636                         if (!eventdev_config->all_internal_ports &&
637                             j == nb_eventqueue-1) {
638                                 eventq_conf.schedule_type =
639                                         RTE_SCHED_TYPE_ATOMIC;
640                         } else {
641                                 eventq_conf.schedule_type =
642                                         em_conf->ext_params.sched_type;
643                         }
644
645                         /* Set max atomic flows to 1024 */
646                         eventq_conf.nb_atomic_flows = 1024;
647                         eventq_conf.nb_atomic_order_sequences = 1024;
648
649                         /* Setup the queue */
650                         ret = rte_event_queue_setup(eventdev_id, j,
651                                         &eventq_conf);
652                         if (ret < 0) {
653                                 EH_LOG_ERR("Failed to setup event queue %d",
654                                            ret);
655                                 return ret;
656                         }
657                 }
658
659                 /* Configure event ports */
660                 for (j = 0; j <  eventdev_config->nb_eventport; j++) {
661                         ret = rte_event_port_setup(eventdev_id, j, NULL);
662                         if (ret < 0) {
663                                 EH_LOG_ERR("Failed to setup event port %d",
664                                            ret);
665                                 return ret;
666                         }
667                 }
668         }
669
670         /* Make event queue - event port link */
671         for (j = 0; j <  em_conf->nb_link; j++) {
672
673                 /* Get link info */
674                 link = &(em_conf->link[j]);
675
676                 /* Get event dev ID */
677                 eventdev_id = link->eventdev_id;
678
679                 /*
680                  * If "all_ev_queue_to_ev_port" params flag is selected, all
681                  * queues need to be mapped to the port.
682                  */
683                 if (em_conf->ext_params.all_ev_queue_to_ev_port)
684                         queue = NULL;
685                 else
686                         queue = &(link->eventq_id);
687
688                 /* Link queue to port */
689                 ret = rte_event_port_link(eventdev_id, link->event_port_id,
690                                 queue, NULL, 1);
691                 if (ret < 0) {
692                         EH_LOG_ERR("Failed to link event port %d", ret);
693                         return ret;
694                 }
695         }
696
697         /* Start event devices */
698         for (i = 0; i < nb_eventdev; i++) {
699
700                 /* Get eventdev config */
701                 eventdev_config = &(em_conf->eventdev_config[i]);
702
703                 ret = rte_event_dev_start(eventdev_config->eventdev_id);
704                 if (ret < 0) {
705                         EH_LOG_ERR("Failed to start event device %d, %d",
706                                    i, ret);
707                         return ret;
708                 }
709         }
710         return 0;
711 }
712
713 static int
714 eh_rx_adapter_configure(struct eventmode_conf *em_conf,
715                 struct rx_adapter_conf *adapter)
716 {
717         struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
718         struct rte_event_dev_info evdev_default_conf = {0};
719         struct rte_event_port_conf port_conf = {0};
720         struct rx_adapter_connection_info *conn;
721         uint8_t eventdev_id;
722         uint32_t service_id;
723         int ret;
724         int j;
725
726         /* Get event dev ID */
727         eventdev_id = adapter->eventdev_id;
728
729         /* Get default configuration of event dev */
730         ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
731         if (ret < 0) {
732                 EH_LOG_ERR("Failed to get event dev info %d", ret);
733                 return ret;
734         }
735
736         /* Setup port conf */
737         port_conf.new_event_threshold = 1200;
738         port_conf.dequeue_depth =
739                         evdev_default_conf.max_event_port_dequeue_depth;
740         port_conf.enqueue_depth =
741                         evdev_default_conf.max_event_port_enqueue_depth;
742
743         /* Create Rx adapter */
744         ret = rte_event_eth_rx_adapter_create(adapter->adapter_id,
745                         adapter->eventdev_id, &port_conf);
746         if (ret < 0) {
747                 EH_LOG_ERR("Failed to create rx adapter %d", ret);
748                 return ret;
749         }
750
751         /* Setup various connections in the adapter */
752         for (j = 0; j < adapter->nb_connections; j++) {
753                 /* Get connection */
754                 conn = &(adapter->conn[j]);
755
756                 /* Setup queue conf */
757                 queue_conf.ev.queue_id = conn->eventq_id;
758                 queue_conf.ev.sched_type = em_conf->ext_params.sched_type;
759                 queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
760
761                 /* Add queue to the adapter */
762                 ret = rte_event_eth_rx_adapter_queue_add(adapter->adapter_id,
763                                 conn->ethdev_id, conn->ethdev_rx_qid,
764                                 &queue_conf);
765                 if (ret < 0) {
766                         EH_LOG_ERR("Failed to add eth queue to rx adapter %d",
767                                    ret);
768                         return ret;
769                 }
770         }
771
772         /* Get the service ID used by rx adapter */
773         ret = rte_event_eth_rx_adapter_service_id_get(adapter->adapter_id,
774                                                       &service_id);
775         if (ret != -ESRCH && ret < 0) {
776                 EH_LOG_ERR("Failed to get service id used by rx adapter %d",
777                            ret);
778                 return ret;
779         }
780
781         rte_service_set_runstate_mapped_check(service_id, 0);
782
783         /* Start adapter */
784         ret = rte_event_eth_rx_adapter_start(adapter->adapter_id);
785         if (ret < 0) {
786                 EH_LOG_ERR("Failed to start rx adapter %d", ret);
787                 return ret;
788         }
789
790         return 0;
791 }
792
793 static int
794 eh_initialize_rx_adapter(struct eventmode_conf *em_conf)
795 {
796         struct rx_adapter_conf *adapter;
797         int i, ret;
798
799         /* Configure rx adapters */
800         for (i = 0; i < em_conf->nb_rx_adapter; i++) {
801                 adapter = &(em_conf->rx_adapter[i]);
802                 ret = eh_rx_adapter_configure(em_conf, adapter);
803                 if (ret < 0) {
804                         EH_LOG_ERR("Failed to configure rx adapter %d", ret);
805                         return ret;
806                 }
807         }
808         return 0;
809 }
810
811 static int32_t
812 eh_start_worker_eth_core(struct eventmode_conf *conf, uint32_t lcore_id)
813 {
814         uint32_t service_id[EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE];
815         struct rx_adapter_conf *rx_adapter;
816         struct tx_adapter_conf *tx_adapter;
817         int service_count = 0;
818         int adapter_id;
819         int32_t ret;
820         int i;
821
822         EH_LOG_INFO("Entering eth_core processing on lcore %u", lcore_id);
823
824         /*
825          * Parse adapter config to check which of all Rx adapters need
826          * to be handled by this core.
827          */
828         for (i = 0; i < conf->nb_rx_adapter; i++) {
829                 /* Check if we have exceeded the max allowed */
830                 if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE) {
831                         EH_LOG_ERR(
832                               "Exceeded the max allowed adapters per rx core");
833                         break;
834                 }
835
836                 rx_adapter = &(conf->rx_adapter[i]);
837                 if (rx_adapter->rx_core_id != lcore_id)
838                         continue;
839
840                 /* Adapter is handled by this core */
841                 adapter_id = rx_adapter->adapter_id;
842
843                 /* Get the service ID for the adapters */
844                 ret = rte_event_eth_rx_adapter_service_id_get(adapter_id,
845                                 &(service_id[service_count]));
846
847                 if (ret != -ESRCH && ret < 0) {
848                         EH_LOG_ERR(
849                                 "Failed to get service id used by rx adapter");
850                         return ret;
851                 }
852
853                 /* Update service count */
854                 service_count++;
855         }
856
857         /*
858          * Parse adapter config to see which of all Tx adapters need
859          * to be handled by this core.
860          */
861         for (i = 0; i < conf->nb_tx_adapter; i++) {
862                 /* Check if we have exceeded the max allowed */
863                 if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_TX_CORE) {
864                         EH_LOG_ERR(
865                                 "Exceeded the max allowed adapters per tx core");
866                         break;
867                 }
868
869                 tx_adapter = &conf->tx_adapter[i];
870                 if (tx_adapter->tx_core_id != lcore_id)
871                         continue;
872
873                 /* Adapter is handled by this core */
874                 adapter_id = tx_adapter->adapter_id;
875
876                 /* Get the service ID for the adapters */
877                 ret = rte_event_eth_tx_adapter_service_id_get(adapter_id,
878                                 &(service_id[service_count]));
879
880                 if (ret != -ESRCH && ret < 0) {
881                         EH_LOG_ERR(
882                                 "Failed to get service id used by tx adapter");
883                         return ret;
884                 }
885
886                 /* Update service count */
887                 service_count++;
888         }
889
890         eth_core_running = true;
891
892         while (eth_core_running) {
893                 for (i = 0; i < service_count; i++) {
894                         /* Initiate adapter service */
895                         rte_service_run_iter_on_app_lcore(service_id[i], 0);
896                 }
897         }
898
899         return 0;
900 }
901
902 static int32_t
903 eh_stop_worker_eth_core(void)
904 {
905         if (eth_core_running) {
906                 EH_LOG_INFO("Stopping eth cores");
907                 eth_core_running = false;
908         }
909         return 0;
910 }
911
912 static struct eh_app_worker_params *
913 eh_find_worker(uint32_t lcore_id, struct eh_conf *conf,
914                 struct eh_app_worker_params *app_wrkrs, uint8_t nb_wrkr_param)
915 {
916         struct eh_app_worker_params curr_conf = { {{0} }, NULL};
917         struct eh_event_link_info *link = NULL;
918         struct eh_app_worker_params *tmp_wrkr;
919         struct eventmode_conf *em_conf;
920         uint8_t eventdev_id;
921         int i;
922
923         /* Get eventmode config */
924         em_conf = conf->mode_params;
925
926         /*
927          * Use event device from the first lcore-event link.
928          *
929          * Assumption: All lcore-event links tied to a core are using the
930          * same event device. In other words, one core would be polling on
931          * queues of a single event device only.
932          */
933
934         /* Get a link for this lcore */
935         for (i = 0; i < em_conf->nb_link; i++) {
936                 link = &(em_conf->link[i]);
937                 if (link->lcore_id == lcore_id)
938                         break;
939         }
940
941         if (link == NULL) {
942                 EH_LOG_ERR("No valid link found for lcore %d", lcore_id);
943                 return NULL;
944         }
945
946         /* Get event dev ID */
947         eventdev_id = link->eventdev_id;
948
949         /* Populate the curr_conf with the capabilities */
950
951         /* Check for Tx internal port */
952         if (eh_dev_has_tx_internal_port(eventdev_id))
953                 curr_conf.cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
954         else
955                 curr_conf.cap.tx_internal_port = EH_TX_TYPE_NO_INTERNAL_PORT;
956
957         /* Check for burst mode */
958         if (eh_dev_has_burst_mode(eventdev_id))
959                 curr_conf.cap.burst = EH_RX_TYPE_BURST;
960         else
961                 curr_conf.cap.burst = EH_RX_TYPE_NON_BURST;
962
963         curr_conf.cap.ipsec_mode = conf->ipsec_mode;
964
965         /* Parse the passed list and see if we have matching capabilities */
966
967         /* Initialize the pointer used to traverse the list */
968         tmp_wrkr = app_wrkrs;
969
970         for (i = 0; i < nb_wrkr_param; i++, tmp_wrkr++) {
971
972                 /* Skip this if capabilities are not matching */
973                 if (tmp_wrkr->cap.u64 != curr_conf.cap.u64)
974                         continue;
975
976                 /* If the checks pass, we have a match */
977                 return tmp_wrkr;
978         }
979
980         return NULL;
981 }
982
983 static int
984 eh_verify_match_worker(struct eh_app_worker_params *match_wrkr)
985 {
986         /* Verify registered worker */
987         if (match_wrkr->worker_thread == NULL) {
988                 EH_LOG_ERR("No worker registered");
989                 return 0;
990         }
991
992         /* Success */
993         return 1;
994 }
995
996 static uint8_t
997 eh_get_event_lcore_links(uint32_t lcore_id, struct eh_conf *conf,
998                 struct eh_event_link_info **links)
999 {
1000         struct eh_event_link_info *link_cache;
1001         struct eventmode_conf *em_conf = NULL;
1002         struct eh_event_link_info *link;
1003         uint8_t lcore_nb_link = 0;
1004         size_t single_link_size;
1005         size_t cache_size;
1006         int index = 0;
1007         int i;
1008
1009         if (conf == NULL || links == NULL) {
1010                 EH_LOG_ERR("Invalid args");
1011                 return -EINVAL;
1012         }
1013
1014         /* Get eventmode conf */
1015         em_conf = conf->mode_params;
1016
1017         if (em_conf == NULL) {
1018                 EH_LOG_ERR("Invalid event mode parameters");
1019                 return -EINVAL;
1020         }
1021
1022         /* Get the number of links registered */
1023         for (i = 0; i < em_conf->nb_link; i++) {
1024
1025                 /* Get link */
1026                 link = &(em_conf->link[i]);
1027
1028                 /* Check if we have link intended for this lcore */
1029                 if (link->lcore_id == lcore_id) {
1030
1031                         /* Update the number of links for this core */
1032                         lcore_nb_link++;
1033
1034                 }
1035         }
1036
1037         /* Compute size of one entry to be copied */
1038         single_link_size = sizeof(struct eh_event_link_info);
1039
1040         /* Compute size of the buffer required */
1041         cache_size = lcore_nb_link * sizeof(struct eh_event_link_info);
1042
1043         /* Compute size of the buffer required */
1044         link_cache = calloc(1, cache_size);
1045
1046         /* Get the number of links registered */
1047         for (i = 0; i < em_conf->nb_link; i++) {
1048
1049                 /* Get link */
1050                 link = &(em_conf->link[i]);
1051
1052                 /* Check if we have link intended for this lcore */
1053                 if (link->lcore_id == lcore_id) {
1054
1055                         /* Cache the link */
1056                         memcpy(&link_cache[index], link, single_link_size);
1057
1058                         /* Update index */
1059                         index++;
1060                 }
1061         }
1062
1063         /* Update the links for application to use the cached links */
1064         *links = link_cache;
1065
1066         /* Return the number of cached links */
1067         return lcore_nb_link;
1068 }
1069
1070 static int
1071 eh_tx_adapter_configure(struct eventmode_conf *em_conf,
1072                 struct tx_adapter_conf *adapter)
1073 {
1074         struct rte_event_dev_info evdev_default_conf = {0};
1075         struct rte_event_port_conf port_conf = {0};
1076         struct tx_adapter_connection_info *conn;
1077         struct eventdev_params *eventdev_config;
1078         uint8_t tx_port_id = 0;
1079         uint8_t eventdev_id;
1080         uint32_t service_id;
1081         int ret, j;
1082
1083         /* Get event dev ID */
1084         eventdev_id = adapter->eventdev_id;
1085
1086         /* Get event device conf */
1087         eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id);
1088
1089         /* Create Tx adapter */
1090
1091         /* Get default configuration of event dev */
1092         ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
1093         if (ret < 0) {
1094                 EH_LOG_ERR("Failed to get event dev info %d", ret);
1095                 return ret;
1096         }
1097
1098         /* Setup port conf */
1099         port_conf.new_event_threshold =
1100                         evdev_default_conf.max_num_events;
1101         port_conf.dequeue_depth =
1102                         evdev_default_conf.max_event_port_dequeue_depth;
1103         port_conf.enqueue_depth =
1104                         evdev_default_conf.max_event_port_enqueue_depth;
1105
1106         /* Create adapter */
1107         ret = rte_event_eth_tx_adapter_create(adapter->adapter_id,
1108                         adapter->eventdev_id, &port_conf);
1109         if (ret < 0) {
1110                 EH_LOG_ERR("Failed to create tx adapter %d", ret);
1111                 return ret;
1112         }
1113
1114         /* Setup various connections in the adapter */
1115         for (j = 0; j < adapter->nb_connections; j++) {
1116
1117                 /* Get connection */
1118                 conn = &(adapter->conn[j]);
1119
1120                 /* Add queue to the adapter */
1121                 ret = rte_event_eth_tx_adapter_queue_add(adapter->adapter_id,
1122                                 conn->ethdev_id, conn->ethdev_tx_qid);
1123                 if (ret < 0) {
1124                         EH_LOG_ERR("Failed to add eth queue to tx adapter %d",
1125                                    ret);
1126                         return ret;
1127                 }
1128         }
1129
1130         /*
1131          * Check if Tx core is assigned. If Tx core is not assigned then
1132          * the adapter has internal port for submitting Tx packets and
1133          * Tx event queue & port setup is not required
1134          */
1135         if (adapter->tx_core_id == (uint32_t) (-1)) {
1136                 /* Internal port is present */
1137                 goto skip_tx_queue_port_setup;
1138         }
1139
1140         /* Setup Tx queue & port */
1141
1142         /* Get event port used by the adapter */
1143         ret = rte_event_eth_tx_adapter_event_port_get(
1144                         adapter->adapter_id, &tx_port_id);
1145         if (ret) {
1146                 EH_LOG_ERR("Failed to get tx adapter port id %d", ret);
1147                 return ret;
1148         }
1149
1150         /*
1151          * Tx event queue is reserved for Tx adapter. Unlink this queue
1152          * from all other ports
1153          *
1154          */
1155         for (j = 0; j < eventdev_config->nb_eventport; j++) {
1156                 rte_event_port_unlink(eventdev_id, j,
1157                                       &(adapter->tx_ev_queue), 1);
1158         }
1159
1160         /* Link Tx event queue to Tx port */
1161         ret = rte_event_port_link(eventdev_id, tx_port_id,
1162                         &(adapter->tx_ev_queue), NULL, 1);
1163         if (ret != 1) {
1164                 EH_LOG_ERR("Failed to link event queue to port");
1165                 return ret;
1166         }
1167
1168         /* Get the service ID used by Tx adapter */
1169         ret = rte_event_eth_tx_adapter_service_id_get(adapter->adapter_id,
1170                                                       &service_id);
1171         if (ret != -ESRCH && ret < 0) {
1172                 EH_LOG_ERR("Failed to get service id used by tx adapter %d",
1173                            ret);
1174                 return ret;
1175         }
1176
1177         rte_service_set_runstate_mapped_check(service_id, 0);
1178
1179 skip_tx_queue_port_setup:
1180         /* Start adapter */
1181         ret = rte_event_eth_tx_adapter_start(adapter->adapter_id);
1182         if (ret < 0) {
1183                 EH_LOG_ERR("Failed to start tx adapter %d", ret);
1184                 return ret;
1185         }
1186
1187         return 0;
1188 }
1189
1190 static int
1191 eh_initialize_tx_adapter(struct eventmode_conf *em_conf)
1192 {
1193         struct tx_adapter_conf *adapter;
1194         int i, ret;
1195
1196         /* Configure Tx adapters */
1197         for (i = 0; i < em_conf->nb_tx_adapter; i++) {
1198                 adapter = &(em_conf->tx_adapter[i]);
1199                 ret = eh_tx_adapter_configure(em_conf, adapter);
1200                 if (ret < 0) {
1201                         EH_LOG_ERR("Failed to configure tx adapter %d", ret);
1202                         return ret;
1203                 }
1204         }
1205         return 0;
1206 }
1207
1208 static void
1209 eh_display_operating_mode(struct eventmode_conf *em_conf)
1210 {
1211         char sched_types[][32] = {
1212                 "RTE_SCHED_TYPE_ORDERED",
1213                 "RTE_SCHED_TYPE_ATOMIC",
1214                 "RTE_SCHED_TYPE_PARALLEL",
1215         };
1216         EH_LOG_INFO("Operating mode:");
1217
1218         EH_LOG_INFO("\tScheduling type: \t%s",
1219                 sched_types[em_conf->ext_params.sched_type]);
1220
1221         EH_LOG_INFO("");
1222 }
1223
1224 static void
1225 eh_display_event_dev_conf(struct eventmode_conf *em_conf)
1226 {
1227         char queue_mode[][32] = {
1228                 "",
1229                 "ATQ (ALL TYPE QUEUE)",
1230                 "SINGLE LINK",
1231         };
1232         char print_buf[256] = { 0 };
1233         int i;
1234
1235         EH_LOG_INFO("Event Device Configuration:");
1236
1237         for (i = 0; i < em_conf->nb_eventdev; i++) {
1238                 sprintf(print_buf,
1239                         "\tDev ID: %-2d \tQueues: %-2d \tPorts: %-2d",
1240                         em_conf->eventdev_config[i].eventdev_id,
1241                         em_conf->eventdev_config[i].nb_eventqueue,
1242                         em_conf->eventdev_config[i].nb_eventport);
1243                 sprintf(print_buf + strlen(print_buf),
1244                         "\tQueue mode: %s",
1245                         queue_mode[em_conf->eventdev_config[i].ev_queue_mode]);
1246                 EH_LOG_INFO("%s", print_buf);
1247         }
1248         EH_LOG_INFO("");
1249 }
1250
1251 static void
1252 eh_display_rx_adapter_conf(struct eventmode_conf *em_conf)
1253 {
1254         int nb_rx_adapter = em_conf->nb_rx_adapter;
1255         struct rx_adapter_connection_info *conn;
1256         struct rx_adapter_conf *adapter;
1257         char print_buf[256] = { 0 };
1258         int i, j;
1259
1260         EH_LOG_INFO("Rx adapters configured: %d", nb_rx_adapter);
1261
1262         for (i = 0; i < nb_rx_adapter; i++) {
1263                 adapter = &(em_conf->rx_adapter[i]);
1264                 sprintf(print_buf,
1265                         "\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
1266                         adapter->adapter_id,
1267                         adapter->nb_connections,
1268                         adapter->eventdev_id);
1269                 if (adapter->rx_core_id == (uint32_t)-1)
1270                         sprintf(print_buf + strlen(print_buf),
1271                                 "\tRx core: %-2s", "[INTERNAL PORT]");
1272                 else if (adapter->rx_core_id == RTE_MAX_LCORE)
1273                         sprintf(print_buf + strlen(print_buf),
1274                                 "\tRx core: %-2s", "[NONE]");
1275                 else
1276                         sprintf(print_buf + strlen(print_buf),
1277                                 "\tRx core: %-2d", adapter->rx_core_id);
1278
1279                 EH_LOG_INFO("%s", print_buf);
1280
1281                 for (j = 0; j < adapter->nb_connections; j++) {
1282                         conn = &(adapter->conn[j]);
1283
1284                         sprintf(print_buf,
1285                                 "\t\tEthdev ID: %-2d", conn->ethdev_id);
1286
1287                         if (conn->ethdev_rx_qid == -1)
1288                                 sprintf(print_buf + strlen(print_buf),
1289                                         "\tEth rx queue: %-2s", "ALL");
1290                         else
1291                                 sprintf(print_buf + strlen(print_buf),
1292                                         "\tEth rx queue: %-2d",
1293                                         conn->ethdev_rx_qid);
1294
1295                         sprintf(print_buf + strlen(print_buf),
1296                                 "\tEvent queue: %-2d", conn->eventq_id);
1297                         EH_LOG_INFO("%s", print_buf);
1298                 }
1299         }
1300         EH_LOG_INFO("");
1301 }
1302
1303 static void
1304 eh_display_tx_adapter_conf(struct eventmode_conf *em_conf)
1305 {
1306         int nb_tx_adapter = em_conf->nb_tx_adapter;
1307         struct tx_adapter_connection_info *conn;
1308         struct tx_adapter_conf *adapter;
1309         char print_buf[256] = { 0 };
1310         int i, j;
1311
1312         EH_LOG_INFO("Tx adapters configured: %d", nb_tx_adapter);
1313
1314         for (i = 0; i < nb_tx_adapter; i++) {
1315                 adapter = &(em_conf->tx_adapter[i]);
1316                 sprintf(print_buf,
1317                         "\tTx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
1318                         adapter->adapter_id,
1319                         adapter->nb_connections,
1320                         adapter->eventdev_id);
1321                 if (adapter->tx_core_id == (uint32_t)-1)
1322                         sprintf(print_buf + strlen(print_buf),
1323                                 "\tTx core: %-2s", "[INTERNAL PORT]");
1324                 else if (adapter->tx_core_id == RTE_MAX_LCORE)
1325                         sprintf(print_buf + strlen(print_buf),
1326                                 "\tTx core: %-2s", "[NONE]");
1327                 else
1328                         sprintf(print_buf + strlen(print_buf),
1329                                 "\tTx core: %-2d,\tInput event queue: %-2d",
1330                                 adapter->tx_core_id, adapter->tx_ev_queue);
1331
1332                 EH_LOG_INFO("%s", print_buf);
1333
1334                 for (j = 0; j < adapter->nb_connections; j++) {
1335                         conn = &(adapter->conn[j]);
1336
1337                         sprintf(print_buf,
1338                                 "\t\tEthdev ID: %-2d", conn->ethdev_id);
1339
1340                         if (conn->ethdev_tx_qid == -1)
1341                                 sprintf(print_buf + strlen(print_buf),
1342                                         "\tEth tx queue: %-2s", "ALL");
1343                         else
1344                                 sprintf(print_buf + strlen(print_buf),
1345                                         "\tEth tx queue: %-2d",
1346                                         conn->ethdev_tx_qid);
1347                         EH_LOG_INFO("%s", print_buf);
1348                 }
1349         }
1350         EH_LOG_INFO("");
1351 }
1352
1353 static void
1354 eh_display_link_conf(struct eventmode_conf *em_conf)
1355 {
1356         struct eh_event_link_info *link;
1357         char print_buf[256] = { 0 };
1358         int i;
1359
1360         EH_LOG_INFO("Links configured: %d", em_conf->nb_link);
1361
1362         for (i = 0; i < em_conf->nb_link; i++) {
1363                 link = &(em_conf->link[i]);
1364
1365                 sprintf(print_buf,
1366                         "\tEvent dev ID: %-2d\tEvent port: %-2d",
1367                         link->eventdev_id,
1368                         link->event_port_id);
1369
1370                 if (em_conf->ext_params.all_ev_queue_to_ev_port)
1371                         sprintf(print_buf + strlen(print_buf),
1372                                 "Event queue: %-2s\t", "ALL");
1373                 else
1374                         sprintf(print_buf + strlen(print_buf),
1375                                 "Event queue: %-2d\t", link->eventq_id);
1376
1377                 sprintf(print_buf + strlen(print_buf),
1378                         "Lcore: %-2d", link->lcore_id);
1379                 EH_LOG_INFO("%s", print_buf);
1380         }
1381         EH_LOG_INFO("");
1382 }
1383
1384 struct eh_conf *
1385 eh_conf_init(void)
1386 {
1387         struct eventmode_conf *em_conf = NULL;
1388         struct eh_conf *conf = NULL;
1389         unsigned int eth_core_id;
1390         void *bitmap = NULL;
1391         uint32_t nb_bytes;
1392
1393         /* Allocate memory for config */
1394         conf = calloc(1, sizeof(struct eh_conf));
1395         if (conf == NULL) {
1396                 EH_LOG_ERR("Failed to allocate memory for eventmode helper "
1397                            "config");
1398                 return NULL;
1399         }
1400
1401         /* Set default conf */
1402
1403         /* Packet transfer mode: poll */
1404         conf->mode = EH_PKT_TRANSFER_MODE_POLL;
1405         conf->ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
1406
1407         /* Keep all ethernet ports enabled by default */
1408         conf->eth_portmask = -1;
1409
1410         /* Allocate memory for event mode params */
1411         conf->mode_params = calloc(1, sizeof(struct eventmode_conf));
1412         if (conf->mode_params == NULL) {
1413                 EH_LOG_ERR("Failed to allocate memory for event mode params");
1414                 goto free_conf;
1415         }
1416
1417         /* Get eventmode conf */
1418         em_conf = conf->mode_params;
1419
1420         /* Allocate and initialize bitmap for eth cores */
1421         nb_bytes = rte_bitmap_get_memory_footprint(RTE_MAX_LCORE);
1422         if (!nb_bytes) {
1423                 EH_LOG_ERR("Failed to get bitmap footprint");
1424                 goto free_em_conf;
1425         }
1426
1427         bitmap = rte_zmalloc("event-helper-ethcore-bitmap", nb_bytes,
1428                              RTE_CACHE_LINE_SIZE);
1429         if (!bitmap) {
1430                 EH_LOG_ERR("Failed to allocate memory for eth cores bitmap\n");
1431                 goto free_em_conf;
1432         }
1433
1434         em_conf->eth_core_mask = rte_bitmap_init(RTE_MAX_LCORE, bitmap,
1435                                                  nb_bytes);
1436         if (!em_conf->eth_core_mask) {
1437                 EH_LOG_ERR("Failed to initialize bitmap");
1438                 goto free_bitmap;
1439         }
1440
1441         /* Set schedule type as not set */
1442         em_conf->ext_params.sched_type = SCHED_TYPE_NOT_SET;
1443
1444         /* Set two cores as eth cores for Rx & Tx */
1445
1446         /* Use first core other than master core as Rx core */
1447         eth_core_id = rte_get_next_lcore(0,     /* curr core */
1448                                          1,     /* skip master core */
1449                                          0      /* wrap */);
1450
1451         rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
1452
1453         /* Use next core as Tx core */
1454         eth_core_id = rte_get_next_lcore(eth_core_id,   /* curr core */
1455                                          1,             /* skip master core */
1456                                          0              /* wrap */);
1457
1458         rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
1459
1460         return conf;
1461
1462 free_bitmap:
1463         rte_free(bitmap);
1464 free_em_conf:
1465         free(em_conf);
1466 free_conf:
1467         free(conf);
1468         return NULL;
1469 }
1470
1471 void
1472 eh_conf_uninit(struct eh_conf *conf)
1473 {
1474         struct eventmode_conf *em_conf = NULL;
1475
1476         if (!conf || !conf->mode_params)
1477                 return;
1478
1479         /* Get eventmode conf */
1480         em_conf = conf->mode_params;
1481
1482         /* Free evenmode configuration memory */
1483         rte_free(em_conf->eth_core_mask);
1484         free(em_conf);
1485         free(conf);
1486 }
1487
1488 void
1489 eh_display_conf(struct eh_conf *conf)
1490 {
1491         struct eventmode_conf *em_conf;
1492
1493         if (conf == NULL) {
1494                 EH_LOG_ERR("Invalid event helper configuration");
1495                 return;
1496         }
1497
1498         if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1499                 return;
1500
1501         if (conf->mode_params == NULL) {
1502                 EH_LOG_ERR("Invalid event mode parameters");
1503                 return;
1504         }
1505
1506         /* Get eventmode conf */
1507         em_conf = (struct eventmode_conf *)(conf->mode_params);
1508
1509         /* Display user exposed operating modes */
1510         eh_display_operating_mode(em_conf);
1511
1512         /* Display event device conf */
1513         eh_display_event_dev_conf(em_conf);
1514
1515         /* Display Rx adapter conf */
1516         eh_display_rx_adapter_conf(em_conf);
1517
1518         /* Display Tx adapter conf */
1519         eh_display_tx_adapter_conf(em_conf);
1520
1521         /* Display event-lcore link */
1522         eh_display_link_conf(em_conf);
1523 }
1524
1525 int32_t
1526 eh_devs_init(struct eh_conf *conf)
1527 {
1528         struct eventmode_conf *em_conf;
1529         uint16_t port_id;
1530         int ret;
1531
1532         if (conf == NULL) {
1533                 EH_LOG_ERR("Invalid event helper configuration");
1534                 return -EINVAL;
1535         }
1536
1537         if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1538                 return 0;
1539
1540         if (conf->mode_params == NULL) {
1541                 EH_LOG_ERR("Invalid event mode parameters");
1542                 return -EINVAL;
1543         }
1544
1545         /* Get eventmode conf */
1546         em_conf = conf->mode_params;
1547
1548         /* Eventmode conf would need eth portmask */
1549         em_conf->eth_portmask = conf->eth_portmask;
1550
1551         /* Validate the requested config */
1552         ret = eh_validate_conf(em_conf);
1553         if (ret < 0) {
1554                 EH_LOG_ERR("Failed to validate the requested config %d", ret);
1555                 return ret;
1556         }
1557
1558         /* Display the current configuration */
1559         eh_display_conf(conf);
1560
1561         /* Stop eth devices before setting up adapter */
1562         RTE_ETH_FOREACH_DEV(port_id) {
1563
1564                 /* Use only the ports enabled */
1565                 if ((conf->eth_portmask & (1 << port_id)) == 0)
1566                         continue;
1567
1568                 rte_eth_dev_stop(port_id);
1569         }
1570
1571         /* Setup eventdev */
1572         ret = eh_initialize_eventdev(em_conf);
1573         if (ret < 0) {
1574                 EH_LOG_ERR("Failed to initialize event dev %d", ret);
1575                 return ret;
1576         }
1577
1578         /* Setup Rx adapter */
1579         ret = eh_initialize_rx_adapter(em_conf);
1580         if (ret < 0) {
1581                 EH_LOG_ERR("Failed to initialize rx adapter %d", ret);
1582                 return ret;
1583         }
1584
1585         /* Setup Tx adapter */
1586         ret = eh_initialize_tx_adapter(em_conf);
1587         if (ret < 0) {
1588                 EH_LOG_ERR("Failed to initialize tx adapter %d", ret);
1589                 return ret;
1590         }
1591
1592         /* Start eth devices after setting up adapter */
1593         RTE_ETH_FOREACH_DEV(port_id) {
1594
1595                 /* Use only the ports enabled */
1596                 if ((conf->eth_portmask & (1 << port_id)) == 0)
1597                         continue;
1598
1599                 ret = rte_eth_dev_start(port_id);
1600                 if (ret < 0) {
1601                         EH_LOG_ERR("Failed to start eth dev %d, %d",
1602                                    port_id, ret);
1603                         return ret;
1604                 }
1605         }
1606
1607         return 0;
1608 }
1609
1610 int32_t
1611 eh_devs_uninit(struct eh_conf *conf)
1612 {
1613         struct eventmode_conf *em_conf;
1614         int ret, i, j;
1615         uint16_t id;
1616
1617         if (conf == NULL) {
1618                 EH_LOG_ERR("Invalid event helper configuration");
1619                 return -EINVAL;
1620         }
1621
1622         if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1623                 return 0;
1624
1625         if (conf->mode_params == NULL) {
1626                 EH_LOG_ERR("Invalid event mode parameters");
1627                 return -EINVAL;
1628         }
1629
1630         /* Get eventmode conf */
1631         em_conf = conf->mode_params;
1632
1633         /* Stop and release rx adapters */
1634         for (i = 0; i < em_conf->nb_rx_adapter; i++) {
1635
1636                 id = em_conf->rx_adapter[i].adapter_id;
1637                 ret = rte_event_eth_rx_adapter_stop(id);
1638                 if (ret < 0) {
1639                         EH_LOG_ERR("Failed to stop rx adapter %d", ret);
1640                         return ret;
1641                 }
1642
1643                 for (j = 0; j < em_conf->rx_adapter[i].nb_connections; j++) {
1644
1645                         ret = rte_event_eth_rx_adapter_queue_del(id,
1646                                 em_conf->rx_adapter[i].conn[j].ethdev_id, -1);
1647                         if (ret < 0) {
1648                                 EH_LOG_ERR(
1649                                        "Failed to remove rx adapter queues %d",
1650                                        ret);
1651                                 return ret;
1652                         }
1653                 }
1654
1655                 ret = rte_event_eth_rx_adapter_free(id);
1656                 if (ret < 0) {
1657                         EH_LOG_ERR("Failed to free rx adapter %d", ret);
1658                         return ret;
1659                 }
1660         }
1661
1662         /* Stop and release event devices */
1663         for (i = 0; i < em_conf->nb_eventdev; i++) {
1664
1665                 id = em_conf->eventdev_config[i].eventdev_id;
1666                 rte_event_dev_stop(id);
1667
1668                 ret = rte_event_dev_close(id);
1669                 if (ret < 0) {
1670                         EH_LOG_ERR("Failed to close event dev %d, %d", id, ret);
1671                         return ret;
1672                 }
1673         }
1674
1675         /* Stop and release tx adapters */
1676         for (i = 0; i < em_conf->nb_tx_adapter; i++) {
1677
1678                 id = em_conf->tx_adapter[i].adapter_id;
1679                 ret = rte_event_eth_tx_adapter_stop(id);
1680                 if (ret < 0) {
1681                         EH_LOG_ERR("Failed to stop tx adapter %d", ret);
1682                         return ret;
1683                 }
1684
1685                 for (j = 0; j < em_conf->tx_adapter[i].nb_connections; j++) {
1686
1687                         ret = rte_event_eth_tx_adapter_queue_del(id,
1688                                 em_conf->tx_adapter[i].conn[j].ethdev_id, -1);
1689                         if (ret < 0) {
1690                                 EH_LOG_ERR(
1691                                         "Failed to remove tx adapter queues %d",
1692                                         ret);
1693                                 return ret;
1694                         }
1695                 }
1696
1697                 ret = rte_event_eth_tx_adapter_free(id);
1698                 if (ret < 0) {
1699                         EH_LOG_ERR("Failed to free tx adapter %d", ret);
1700                         return ret;
1701                 }
1702         }
1703
1704         return 0;
1705 }
1706
1707 void
1708 eh_launch_worker(struct eh_conf *conf, struct eh_app_worker_params *app_wrkr,
1709                 uint8_t nb_wrkr_param)
1710 {
1711         struct eh_app_worker_params *match_wrkr;
1712         struct eh_event_link_info *links = NULL;
1713         struct eventmode_conf *em_conf;
1714         uint32_t lcore_id;
1715         uint8_t nb_links;
1716
1717         if (conf == NULL) {
1718                 EH_LOG_ERR("Invalid event helper configuration");
1719                 return;
1720         }
1721
1722         if (conf->mode_params == NULL) {
1723                 EH_LOG_ERR("Invalid event mode parameters");
1724                 return;
1725         }
1726
1727         /* Get eventmode conf */
1728         em_conf = conf->mode_params;
1729
1730         /* Get core ID */
1731         lcore_id = rte_lcore_id();
1732
1733         /* Check if this is eth core */
1734         if (rte_bitmap_get(em_conf->eth_core_mask, lcore_id)) {
1735                 eh_start_worker_eth_core(em_conf, lcore_id);
1736                 return;
1737         }
1738
1739         if (app_wrkr == NULL || nb_wrkr_param == 0) {
1740                 EH_LOG_ERR("Invalid args");
1741                 return;
1742         }
1743
1744         /*
1745          * This is a regular worker thread. The application registers
1746          * multiple workers with various capabilities. Run worker
1747          * based on the selected capabilities of the event
1748          * device configured.
1749          */
1750
1751         /* Get the first matching worker for the event device */
1752         match_wrkr = eh_find_worker(lcore_id, conf, app_wrkr, nb_wrkr_param);
1753         if (match_wrkr == NULL) {
1754                 EH_LOG_ERR("Failed to match worker registered for lcore %d",
1755                            lcore_id);
1756                 goto clean_and_exit;
1757         }
1758
1759         /* Verify sanity of the matched worker */
1760         if (eh_verify_match_worker(match_wrkr) != 1) {
1761                 EH_LOG_ERR("Failed to validate the matched worker");
1762                 goto clean_and_exit;
1763         }
1764
1765         /* Get worker links */
1766         nb_links = eh_get_event_lcore_links(lcore_id, conf, &links);
1767
1768         /* Launch the worker thread */
1769         match_wrkr->worker_thread(links, nb_links);
1770
1771         /* Free links info memory */
1772         free(links);
1773
1774 clean_and_exit:
1775
1776         /* Flag eth_cores to stop, if started */
1777         eh_stop_worker_eth_core();
1778 }
1779
1780 uint8_t
1781 eh_get_tx_queue(struct eh_conf *conf, uint8_t eventdev_id)
1782 {
1783         struct eventdev_params *eventdev_config;
1784         struct eventmode_conf *em_conf;
1785
1786         if (conf == NULL) {
1787                 EH_LOG_ERR("Invalid event helper configuration");
1788                 return -EINVAL;
1789         }
1790
1791         if (conf->mode_params == NULL) {
1792                 EH_LOG_ERR("Invalid event mode parameters");
1793                 return -EINVAL;
1794         }
1795
1796         /* Get eventmode conf */
1797         em_conf = conf->mode_params;
1798
1799         /* Get event device conf */
1800         eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id);
1801
1802         if (eventdev_config == NULL) {
1803                 EH_LOG_ERR("Failed to read eventdev config");
1804                 return -EINVAL;
1805         }
1806
1807         /*
1808          * The last queue is reserved to be used as atomic queue for the
1809          * last stage (eth packet tx stage)
1810          */
1811         return eventdev_config->nb_eventqueue - 1;
1812 }