examples: check stop call status
[dpdk.git] / examples / ipsec-secgw / event_helper.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell International Ltd.
3  */
4 #include <rte_bitmap.h>
5 #include <rte_ethdev.h>
6 #include <rte_eventdev.h>
7 #include <rte_event_eth_rx_adapter.h>
8 #include <rte_event_eth_tx_adapter.h>
9 #include <rte_malloc.h>
10 #include <stdbool.h>
11
12 #include "event_helper.h"
13
14 static volatile bool eth_core_running;
15
16 static int
17 eh_get_enabled_cores(struct rte_bitmap *eth_core_mask)
18 {
19         int i, count = 0;
20
21         RTE_LCORE_FOREACH(i) {
22                 /* Check if this core is enabled in core mask*/
23                 if (rte_bitmap_get(eth_core_mask, i)) {
24                         /* Found enabled core */
25                         count++;
26                 }
27         }
28         return count;
29 }
30
31 static inline unsigned int
32 eh_get_next_eth_core(struct eventmode_conf *em_conf)
33 {
34         static unsigned int prev_core = -1;
35         unsigned int next_core;
36
37         /*
38          * Make sure we have at least one eth core running, else the following
39          * logic would lead to an infinite loop.
40          */
41         if (eh_get_enabled_cores(em_conf->eth_core_mask) == 0) {
42                 EH_LOG_ERR("No enabled eth core found");
43                 return RTE_MAX_LCORE;
44         }
45
46         /* Only some cores are marked as eth cores, skip others */
47         do {
48                 /* Get the next core */
49                 next_core = rte_get_next_lcore(prev_core, 0, 1);
50
51                 /* Check if we have reached max lcores */
52                 if (next_core == RTE_MAX_LCORE)
53                         return next_core;
54
55                 /* Update prev_core */
56                 prev_core = next_core;
57         } while (!(rte_bitmap_get(em_conf->eth_core_mask, next_core)));
58
59         return next_core;
60 }
61
62 static inline unsigned int
63 eh_get_next_active_core(struct eventmode_conf *em_conf, unsigned int prev_core)
64 {
65         unsigned int next_core;
66
67         /* Get next active core skipping cores reserved as eth cores */
68         do {
69                 /* Get the next core */
70                 next_core = rte_get_next_lcore(prev_core, 0, 0);
71
72                 /* Check if we have reached max lcores */
73                 if (next_core == RTE_MAX_LCORE)
74                         return next_core;
75
76                 prev_core = next_core;
77         } while (rte_bitmap_get(em_conf->eth_core_mask, next_core));
78
79         return next_core;
80 }
81
82 static struct eventdev_params *
83 eh_get_eventdev_params(struct eventmode_conf *em_conf, uint8_t eventdev_id)
84 {
85         int i;
86
87         for (i = 0; i < em_conf->nb_eventdev; i++) {
88                 if (em_conf->eventdev_config[i].eventdev_id == eventdev_id)
89                         break;
90         }
91
92         /* No match */
93         if (i == em_conf->nb_eventdev)
94                 return NULL;
95
96         return &(em_conf->eventdev_config[i]);
97 }
98
99 static inline bool
100 eh_dev_has_rx_internal_port(uint8_t eventdev_id)
101 {
102         bool flag = true;
103         int j, ret;
104
105         RTE_ETH_FOREACH_DEV(j) {
106                 uint32_t caps = 0;
107
108                 ret = rte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps);
109                 if (ret < 0)
110                         return false;
111
112                 if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
113                         flag = false;
114         }
115         return flag;
116 }
117
118 static inline bool
119 eh_dev_has_tx_internal_port(uint8_t eventdev_id)
120 {
121         bool flag = true;
122         int j, ret;
123
124         RTE_ETH_FOREACH_DEV(j) {
125                 uint32_t caps = 0;
126
127                 ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps);
128                 if (ret < 0)
129                         return false;
130
131                 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
132                         flag = false;
133         }
134         return flag;
135 }
136
137 static inline bool
138 eh_dev_has_burst_mode(uint8_t dev_id)
139 {
140         struct rte_event_dev_info dev_info;
141
142         rte_event_dev_info_get(dev_id, &dev_info);
143         return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
144                         true : false;
145 }
146
147 static int
148 eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
149 {
150         int lcore_count, nb_eventdev, nb_eth_dev, ret;
151         struct eventdev_params *eventdev_config;
152         struct rte_event_dev_info dev_info;
153
154         /* Get the number of event devices */
155         nb_eventdev = rte_event_dev_count();
156         if (nb_eventdev == 0) {
157                 EH_LOG_ERR("No event devices detected");
158                 return -EINVAL;
159         }
160
161         if (nb_eventdev != 1) {
162                 EH_LOG_ERR("Event mode does not support multiple event devices. "
163                            "Please provide only one event device.");
164                 return -EINVAL;
165         }
166
167         /* Get the number of eth devs */
168         nb_eth_dev = rte_eth_dev_count_avail();
169         if (nb_eth_dev == 0) {
170                 EH_LOG_ERR("No eth devices detected");
171                 return -EINVAL;
172         }
173
174         /* Get the number of lcores */
175         lcore_count = rte_lcore_count();
176
177         /* Read event device info */
178         ret = rte_event_dev_info_get(0, &dev_info);
179         if (ret < 0) {
180                 EH_LOG_ERR("Failed to read event device info %d", ret);
181                 return ret;
182         }
183
184         /* Check if enough ports are available */
185         if (dev_info.max_event_ports < 2) {
186                 EH_LOG_ERR("Not enough event ports available");
187                 return -EINVAL;
188         }
189
190         /* Get the first event dev conf */
191         eventdev_config = &(em_conf->eventdev_config[0]);
192
193         /* Save number of queues & ports available */
194         eventdev_config->eventdev_id = 0;
195         eventdev_config->nb_eventqueue = dev_info.max_event_queues;
196         eventdev_config->nb_eventport = dev_info.max_event_ports;
197         eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
198
199         /* Check if there are more queues than required */
200         if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {
201                 /* One queue is reserved for Tx */
202                 eventdev_config->nb_eventqueue = nb_eth_dev + 1;
203         }
204
205         /* Check if there are more ports than required */
206         if (eventdev_config->nb_eventport > lcore_count) {
207                 /* One port per lcore is enough */
208                 eventdev_config->nb_eventport = lcore_count;
209         }
210
211         /* Update the number of event devices */
212         em_conf->nb_eventdev++;
213
214         return 0;
215 }
216
217 static void
218 eh_do_capability_check(struct eventmode_conf *em_conf)
219 {
220         struct eventdev_params *eventdev_config;
221         int all_internal_ports = 1;
222         uint32_t eventdev_id;
223         int i;
224
225         for (i = 0; i < em_conf->nb_eventdev; i++) {
226
227                 /* Get the event dev conf */
228                 eventdev_config = &(em_conf->eventdev_config[i]);
229                 eventdev_id = eventdev_config->eventdev_id;
230
231                 /* Check if event device has internal port for Rx & Tx */
232                 if (eh_dev_has_rx_internal_port(eventdev_id) &&
233                     eh_dev_has_tx_internal_port(eventdev_id)) {
234                         eventdev_config->all_internal_ports = 1;
235                 } else {
236                         all_internal_ports = 0;
237                 }
238         }
239
240         /*
241          * If Rx & Tx internal ports are supported by all event devices then
242          * eth cores won't be required. Override the eth core mask requested
243          * and decrement number of event queues by one as it won't be needed
244          * for Tx.
245          */
246         if (all_internal_ports) {
247                 rte_bitmap_reset(em_conf->eth_core_mask);
248                 for (i = 0; i < em_conf->nb_eventdev; i++)
249                         em_conf->eventdev_config[i].nb_eventqueue--;
250         }
251 }
252
253 static int
254 eh_set_default_conf_link(struct eventmode_conf *em_conf)
255 {
256         struct eventdev_params *eventdev_config;
257         struct eh_event_link_info *link;
258         unsigned int lcore_id = -1;
259         int i, link_index;
260
261         /*
262          * Create a 1:1 mapping from event ports to cores. If the number
263          * of event ports is lesser than the cores, some cores won't
264          * execute worker. If there are more event ports, then some ports
265          * won't be used.
266          *
267          */
268
269         /*
270          * The event queue-port mapping is done according to the link. Since
271          * we are falling back to the default link config, enabling
272          * "all_ev_queue_to_ev_port" mode flag. This will map all queues
273          * to the port.
274          */
275         em_conf->ext_params.all_ev_queue_to_ev_port = 1;
276
277         /* Get first event dev conf */
278         eventdev_config = &(em_conf->eventdev_config[0]);
279
280         /* Loop through the ports */
281         for (i = 0; i < eventdev_config->nb_eventport; i++) {
282
283                 /* Get next active core id */
284                 lcore_id = eh_get_next_active_core(em_conf,
285                                 lcore_id);
286
287                 if (lcore_id == RTE_MAX_LCORE) {
288                         /* Reached max cores */
289                         return 0;
290                 }
291
292                 /* Save the current combination as one link */
293
294                 /* Get the index */
295                 link_index = em_conf->nb_link;
296
297                 /* Get the corresponding link */
298                 link = &(em_conf->link[link_index]);
299
300                 /* Save link */
301                 link->eventdev_id = eventdev_config->eventdev_id;
302                 link->event_port_id = i;
303                 link->lcore_id = lcore_id;
304
305                 /*
306                  * Don't set eventq_id as by default all queues
307                  * need to be mapped to the port, which is controlled
308                  * by the operating mode.
309                  */
310
311                 /* Update number of links */
312                 em_conf->nb_link++;
313         }
314
315         return 0;
316 }
317
318 static int
319 eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
320 {
321         struct rx_adapter_connection_info *conn;
322         struct eventdev_params *eventdev_config;
323         struct rx_adapter_conf *adapter;
324         bool rx_internal_port = true;
325         bool single_ev_queue = false;
326         int nb_eventqueue;
327         uint32_t caps = 0;
328         int eventdev_id;
329         int nb_eth_dev;
330         int adapter_id;
331         int conn_id;
332         int ret;
333         int i;
334
335         /* Create one adapter with eth queues mapped to event queue(s) */
336
337         if (em_conf->nb_eventdev == 0) {
338                 EH_LOG_ERR("No event devs registered");
339                 return -EINVAL;
340         }
341
342         /* Get the number of eth devs */
343         nb_eth_dev = rte_eth_dev_count_avail();
344
345         /* Use the first event dev */
346         eventdev_config = &(em_conf->eventdev_config[0]);
347
348         /* Get eventdev ID */
349         eventdev_id = eventdev_config->eventdev_id;
350         adapter_id = 0;
351
352         /* Get adapter conf */
353         adapter = &(em_conf->rx_adapter[adapter_id]);
354
355         /* Set adapter conf */
356         adapter->eventdev_id = eventdev_id;
357         adapter->adapter_id = adapter_id;
358
359         /*
360          * If event device does not have internal ports for passing
361          * packets then reserved one queue for Tx path
362          */
363         nb_eventqueue = eventdev_config->all_internal_ports ?
364                         eventdev_config->nb_eventqueue :
365                         eventdev_config->nb_eventqueue - 1;
366
367         /*
368          * Map all queues of eth device (port) to an event queue. If there
369          * are more event queues than eth ports then create 1:1 mapping.
370          * Otherwise map all eth ports to a single event queue.
371          */
372         if (nb_eth_dev > nb_eventqueue)
373                 single_ev_queue = true;
374
375         for (i = 0; i < nb_eth_dev; i++) {
376
377                 /* Use only the ports enabled */
378                 if ((em_conf->eth_portmask & (1 << i)) == 0)
379                         continue;
380
381                 /* Get the connection id */
382                 conn_id = adapter->nb_connections;
383
384                 /* Get the connection */
385                 conn = &(adapter->conn[conn_id]);
386
387                 /* Set mapping between eth ports & event queues*/
388                 conn->ethdev_id = i;
389                 conn->eventq_id = single_ev_queue ? 0 : i;
390
391                 /* Add all eth queues eth port to event queue */
392                 conn->ethdev_rx_qid = -1;
393
394                 /* Get Rx adapter capabilities */
395                 ret = rte_event_eth_rx_adapter_caps_get(eventdev_id, i, &caps);
396                 if (ret < 0) {
397                         EH_LOG_ERR("Failed to get event device %d eth rx adapter"
398                                    " capabilities for port %d", eventdev_id, i);
399                         return ret;
400                 }
401                 if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
402                         rx_internal_port = false;
403
404                 /* Update no of connections */
405                 adapter->nb_connections++;
406
407         }
408
409         if (rx_internal_port) {
410                 /* Rx core is not required */
411                 adapter->rx_core_id = -1;
412         } else {
413                 /* Rx core is required */
414                 adapter->rx_core_id = eh_get_next_eth_core(em_conf);
415         }
416
417         /* We have setup one adapter */
418         em_conf->nb_rx_adapter = 1;
419
420         return 0;
421 }
422
423 static int
424 eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)
425 {
426         struct tx_adapter_connection_info *conn;
427         struct eventdev_params *eventdev_config;
428         struct tx_adapter_conf *tx_adapter;
429         bool tx_internal_port = true;
430         uint32_t caps = 0;
431         int eventdev_id;
432         int adapter_id;
433         int nb_eth_dev;
434         int conn_id;
435         int ret;
436         int i;
437
438         /*
439          * Create one Tx adapter with all eth queues mapped to event queues
440          * 1:1.
441          */
442
443         if (em_conf->nb_eventdev == 0) {
444                 EH_LOG_ERR("No event devs registered");
445                 return -EINVAL;
446         }
447
448         /* Get the number of eth devs */
449         nb_eth_dev = rte_eth_dev_count_avail();
450
451         /* Use the first event dev */
452         eventdev_config = &(em_conf->eventdev_config[0]);
453
454         /* Get eventdev ID */
455         eventdev_id = eventdev_config->eventdev_id;
456         adapter_id = 0;
457
458         /* Get adapter conf */
459         tx_adapter = &(em_conf->tx_adapter[adapter_id]);
460
461         /* Set adapter conf */
462         tx_adapter->eventdev_id = eventdev_id;
463         tx_adapter->adapter_id = adapter_id;
464
465         /*
466          * Map all Tx queues of the eth device (port) to the event device.
467          */
468
469         /* Set defaults for connections */
470
471         /*
472          * One eth device (port) is one connection. Map all Tx queues
473          * of the device to the Tx adapter.
474          */
475
476         for (i = 0; i < nb_eth_dev; i++) {
477
478                 /* Use only the ports enabled */
479                 if ((em_conf->eth_portmask & (1 << i)) == 0)
480                         continue;
481
482                 /* Get the connection id */
483                 conn_id = tx_adapter->nb_connections;
484
485                 /* Get the connection */
486                 conn = &(tx_adapter->conn[conn_id]);
487
488                 /* Add ethdev to connections */
489                 conn->ethdev_id = i;
490
491                 /* Add all eth tx queues to adapter */
492                 conn->ethdev_tx_qid = -1;
493
494                 /* Get Tx adapter capabilities */
495                 ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);
496                 if (ret < 0) {
497                         EH_LOG_ERR("Failed to get event device %d eth tx adapter"
498                                    " capabilities for port %d", eventdev_id, i);
499                         return ret;
500                 }
501                 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
502                         tx_internal_port = false;
503
504                 /* Update no of connections */
505                 tx_adapter->nb_connections++;
506         }
507
508         if (tx_internal_port) {
509                 /* Tx core is not required */
510                 tx_adapter->tx_core_id = -1;
511         } else {
512                 /* Tx core is required */
513                 tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);
514
515                 /*
516                  * Use one event queue per adapter for submitting packets
517                  * for Tx. Reserving the last queue available
518                  */
519                 /* Queue numbers start at 0 */
520                 tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;
521         }
522
523         /* We have setup one adapter */
524         em_conf->nb_tx_adapter = 1;
525         return 0;
526 }
527
528 static int
529 eh_validate_conf(struct eventmode_conf *em_conf)
530 {
531         int ret;
532
533         /*
534          * Check if event devs are specified. Else probe the event devices
535          * and initialize the config with all ports & queues available
536          */
537         if (em_conf->nb_eventdev == 0) {
538                 ret = eh_set_default_conf_eventdev(em_conf);
539                 if (ret != 0)
540                         return ret;
541         }
542
543         /* Perform capability check for the selected event devices */
544         eh_do_capability_check(em_conf);
545
546         /*
547          * Check if links are specified. Else generate a default config for
548          * the event ports used.
549          */
550         if (em_conf->nb_link == 0) {
551                 ret = eh_set_default_conf_link(em_conf);
552                 if (ret != 0)
553                         return ret;
554         }
555
556         /*
557          * Check if rx adapters are specified. Else generate a default config
558          * with one rx adapter and all eth queues - event queue mapped.
559          */
560         if (em_conf->nb_rx_adapter == 0) {
561                 ret = eh_set_default_conf_rx_adapter(em_conf);
562                 if (ret != 0)
563                         return ret;
564         }
565
566         /*
567          * Check if tx adapters are specified. Else generate a default config
568          * with one tx adapter.
569          */
570         if (em_conf->nb_tx_adapter == 0) {
571                 ret = eh_set_default_conf_tx_adapter(em_conf);
572                 if (ret != 0)
573                         return ret;
574         }
575
576         return 0;
577 }
578
579 static int
580 eh_initialize_eventdev(struct eventmode_conf *em_conf)
581 {
582         struct rte_event_queue_conf eventq_conf = {0};
583         struct rte_event_dev_info evdev_default_conf;
584         struct rte_event_dev_config eventdev_conf;
585         struct eventdev_params *eventdev_config;
586         int nb_eventdev = em_conf->nb_eventdev;
587         struct eh_event_link_info *link;
588         uint8_t *queue = NULL;
589         uint8_t eventdev_id;
590         int nb_eventqueue;
591         uint8_t i, j;
592         int ret;
593
594         for (i = 0; i < nb_eventdev; i++) {
595
596                 /* Get eventdev config */
597                 eventdev_config = &(em_conf->eventdev_config[i]);
598
599                 /* Get event dev ID */
600                 eventdev_id = eventdev_config->eventdev_id;
601
602                 /* Get the number of queues */
603                 nb_eventqueue = eventdev_config->nb_eventqueue;
604
605                 /* Reset the default conf */
606                 memset(&evdev_default_conf, 0,
607                         sizeof(struct rte_event_dev_info));
608
609                 /* Get default conf of eventdev */
610                 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
611                 if (ret < 0) {
612                         EH_LOG_ERR(
613                                 "Error in getting event device info[devID:%d]",
614                                 eventdev_id);
615                         return ret;
616                 }
617
618                 memset(&eventdev_conf, 0, sizeof(struct rte_event_dev_config));
619                 eventdev_conf.nb_events_limit =
620                                 evdev_default_conf.max_num_events;
621                 eventdev_conf.nb_event_queues = nb_eventqueue;
622                 eventdev_conf.nb_event_ports =
623                                 eventdev_config->nb_eventport;
624                 eventdev_conf.nb_event_queue_flows =
625                                 evdev_default_conf.max_event_queue_flows;
626                 eventdev_conf.nb_event_port_dequeue_depth =
627                                 evdev_default_conf.max_event_port_dequeue_depth;
628                 eventdev_conf.nb_event_port_enqueue_depth =
629                                 evdev_default_conf.max_event_port_enqueue_depth;
630
631                 /* Configure event device */
632                 ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
633                 if (ret < 0) {
634                         EH_LOG_ERR("Error in configuring event device");
635                         return ret;
636                 }
637
638                 /* Configure event queues */
639                 for (j = 0; j < nb_eventqueue; j++) {
640
641                         memset(&eventq_conf, 0,
642                                         sizeof(struct rte_event_queue_conf));
643
644                         /* Per event dev queues can be ATQ or SINGLE LINK */
645                         eventq_conf.event_queue_cfg =
646                                         eventdev_config->ev_queue_mode;
647                         /*
648                          * All queues need to be set with sched_type as
649                          * schedule type for the application stage. One
650                          * queue would be reserved for the final eth tx
651                          * stage if event device does not have internal
652                          * ports. This will be an atomic queue.
653                          */
654                         if (!eventdev_config->all_internal_ports &&
655                             j == nb_eventqueue-1) {
656                                 eventq_conf.schedule_type =
657                                         RTE_SCHED_TYPE_ATOMIC;
658                         } else {
659                                 eventq_conf.schedule_type =
660                                         em_conf->ext_params.sched_type;
661                         }
662
663                         /* Set max atomic flows to 1024 */
664                         eventq_conf.nb_atomic_flows = 1024;
665                         eventq_conf.nb_atomic_order_sequences = 1024;
666
667                         /* Setup the queue */
668                         ret = rte_event_queue_setup(eventdev_id, j,
669                                         &eventq_conf);
670                         if (ret < 0) {
671                                 EH_LOG_ERR("Failed to setup event queue %d",
672                                            ret);
673                                 return ret;
674                         }
675                 }
676
677                 /* Configure event ports */
678                 for (j = 0; j <  eventdev_config->nb_eventport; j++) {
679                         ret = rte_event_port_setup(eventdev_id, j, NULL);
680                         if (ret < 0) {
681                                 EH_LOG_ERR("Failed to setup event port %d",
682                                            ret);
683                                 return ret;
684                         }
685                 }
686         }
687
688         /* Make event queue - event port link */
689         for (j = 0; j <  em_conf->nb_link; j++) {
690
691                 /* Get link info */
692                 link = &(em_conf->link[j]);
693
694                 /* Get event dev ID */
695                 eventdev_id = link->eventdev_id;
696
697                 /*
698                  * If "all_ev_queue_to_ev_port" params flag is selected, all
699                  * queues need to be mapped to the port.
700                  */
701                 if (em_conf->ext_params.all_ev_queue_to_ev_port)
702                         queue = NULL;
703                 else
704                         queue = &(link->eventq_id);
705
706                 /* Link queue to port */
707                 ret = rte_event_port_link(eventdev_id, link->event_port_id,
708                                 queue, NULL, 1);
709                 if (ret < 0) {
710                         EH_LOG_ERR("Failed to link event port %d", ret);
711                         return ret;
712                 }
713         }
714
715         /* Start event devices */
716         for (i = 0; i < nb_eventdev; i++) {
717
718                 /* Get eventdev config */
719                 eventdev_config = &(em_conf->eventdev_config[i]);
720
721                 ret = rte_event_dev_start(eventdev_config->eventdev_id);
722                 if (ret < 0) {
723                         EH_LOG_ERR("Failed to start event device %d, %d",
724                                    i, ret);
725                         return ret;
726                 }
727         }
728         return 0;
729 }
730
731 static int
732 eh_rx_adapter_configure(struct eventmode_conf *em_conf,
733                 struct rx_adapter_conf *adapter)
734 {
735         struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
736         struct rte_event_dev_info evdev_default_conf = {0};
737         struct rte_event_port_conf port_conf = {0};
738         struct rx_adapter_connection_info *conn;
739         uint8_t eventdev_id;
740         uint32_t service_id;
741         int ret;
742         int j;
743
744         /* Get event dev ID */
745         eventdev_id = adapter->eventdev_id;
746
747         /* Get default configuration of event dev */
748         ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
749         if (ret < 0) {
750                 EH_LOG_ERR("Failed to get event dev info %d", ret);
751                 return ret;
752         }
753
754         /* Setup port conf */
755         port_conf.new_event_threshold = 1200;
756         port_conf.dequeue_depth =
757                         evdev_default_conf.max_event_port_dequeue_depth;
758         port_conf.enqueue_depth =
759                         evdev_default_conf.max_event_port_enqueue_depth;
760
761         /* Create Rx adapter */
762         ret = rte_event_eth_rx_adapter_create(adapter->adapter_id,
763                         adapter->eventdev_id, &port_conf);
764         if (ret < 0) {
765                 EH_LOG_ERR("Failed to create rx adapter %d", ret);
766                 return ret;
767         }
768
769         /* Setup various connections in the adapter */
770         for (j = 0; j < adapter->nb_connections; j++) {
771                 /* Get connection */
772                 conn = &(adapter->conn[j]);
773
774                 /* Setup queue conf */
775                 queue_conf.ev.queue_id = conn->eventq_id;
776                 queue_conf.ev.sched_type = em_conf->ext_params.sched_type;
777                 queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
778
779                 /* Add queue to the adapter */
780                 ret = rte_event_eth_rx_adapter_queue_add(adapter->adapter_id,
781                                 conn->ethdev_id, conn->ethdev_rx_qid,
782                                 &queue_conf);
783                 if (ret < 0) {
784                         EH_LOG_ERR("Failed to add eth queue to rx adapter %d",
785                                    ret);
786                         return ret;
787                 }
788         }
789
790         /* Get the service ID used by rx adapter */
791         ret = rte_event_eth_rx_adapter_service_id_get(adapter->adapter_id,
792                                                       &service_id);
793         if (ret != -ESRCH && ret < 0) {
794                 EH_LOG_ERR("Failed to get service id used by rx adapter %d",
795                            ret);
796                 return ret;
797         }
798
799         rte_service_set_runstate_mapped_check(service_id, 0);
800
801         /* Start adapter */
802         ret = rte_event_eth_rx_adapter_start(adapter->adapter_id);
803         if (ret < 0) {
804                 EH_LOG_ERR("Failed to start rx adapter %d", ret);
805                 return ret;
806         }
807
808         return 0;
809 }
810
811 static int
812 eh_initialize_rx_adapter(struct eventmode_conf *em_conf)
813 {
814         struct rx_adapter_conf *adapter;
815         int i, ret;
816
817         /* Configure rx adapters */
818         for (i = 0; i < em_conf->nb_rx_adapter; i++) {
819                 adapter = &(em_conf->rx_adapter[i]);
820                 ret = eh_rx_adapter_configure(em_conf, adapter);
821                 if (ret < 0) {
822                         EH_LOG_ERR("Failed to configure rx adapter %d", ret);
823                         return ret;
824                 }
825         }
826         return 0;
827 }
828
829 static int32_t
830 eh_start_worker_eth_core(struct eventmode_conf *conf, uint32_t lcore_id)
831 {
832         uint32_t service_id[EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE];
833         struct rx_adapter_conf *rx_adapter;
834         struct tx_adapter_conf *tx_adapter;
835         int service_count = 0;
836         int adapter_id;
837         int32_t ret;
838         int i;
839
840         EH_LOG_INFO("Entering eth_core processing on lcore %u", lcore_id);
841
842         /*
843          * Parse adapter config to check which of all Rx adapters need
844          * to be handled by this core.
845          */
846         for (i = 0; i < conf->nb_rx_adapter; i++) {
847                 /* Check if we have exceeded the max allowed */
848                 if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE) {
849                         EH_LOG_ERR(
850                               "Exceeded the max allowed adapters per rx core");
851                         break;
852                 }
853
854                 rx_adapter = &(conf->rx_adapter[i]);
855                 if (rx_adapter->rx_core_id != lcore_id)
856                         continue;
857
858                 /* Adapter is handled by this core */
859                 adapter_id = rx_adapter->adapter_id;
860
861                 /* Get the service ID for the adapters */
862                 ret = rte_event_eth_rx_adapter_service_id_get(adapter_id,
863                                 &(service_id[service_count]));
864
865                 if (ret != -ESRCH && ret < 0) {
866                         EH_LOG_ERR(
867                                 "Failed to get service id used by rx adapter");
868                         return ret;
869                 }
870
871                 /* Update service count */
872                 service_count++;
873         }
874
875         /*
876          * Parse adapter config to see which of all Tx adapters need
877          * to be handled by this core.
878          */
879         for (i = 0; i < conf->nb_tx_adapter; i++) {
880                 /* Check if we have exceeded the max allowed */
881                 if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_TX_CORE) {
882                         EH_LOG_ERR(
883                                 "Exceeded the max allowed adapters per tx core");
884                         break;
885                 }
886
887                 tx_adapter = &conf->tx_adapter[i];
888                 if (tx_adapter->tx_core_id != lcore_id)
889                         continue;
890
891                 /* Adapter is handled by this core */
892                 adapter_id = tx_adapter->adapter_id;
893
894                 /* Get the service ID for the adapters */
895                 ret = rte_event_eth_tx_adapter_service_id_get(adapter_id,
896                                 &(service_id[service_count]));
897
898                 if (ret != -ESRCH && ret < 0) {
899                         EH_LOG_ERR(
900                                 "Failed to get service id used by tx adapter");
901                         return ret;
902                 }
903
904                 /* Update service count */
905                 service_count++;
906         }
907
908         eth_core_running = true;
909
910         while (eth_core_running) {
911                 for (i = 0; i < service_count; i++) {
912                         /* Initiate adapter service */
913                         rte_service_run_iter_on_app_lcore(service_id[i], 0);
914                 }
915         }
916
917         return 0;
918 }
919
920 static int32_t
921 eh_stop_worker_eth_core(void)
922 {
923         if (eth_core_running) {
924                 EH_LOG_INFO("Stopping eth cores");
925                 eth_core_running = false;
926         }
927         return 0;
928 }
929
930 static struct eh_app_worker_params *
931 eh_find_worker(uint32_t lcore_id, struct eh_conf *conf,
932                 struct eh_app_worker_params *app_wrkrs, uint8_t nb_wrkr_param)
933 {
934         struct eh_app_worker_params curr_conf = { {{0} }, NULL};
935         struct eh_event_link_info *link = NULL;
936         struct eh_app_worker_params *tmp_wrkr;
937         struct eventmode_conf *em_conf;
938         uint8_t eventdev_id;
939         int i;
940
941         /* Get eventmode config */
942         em_conf = conf->mode_params;
943
944         /*
945          * Use event device from the first lcore-event link.
946          *
947          * Assumption: All lcore-event links tied to a core are using the
948          * same event device. In other words, one core would be polling on
949          * queues of a single event device only.
950          */
951
952         /* Get a link for this lcore */
953         for (i = 0; i < em_conf->nb_link; i++) {
954                 link = &(em_conf->link[i]);
955                 if (link->lcore_id == lcore_id)
956                         break;
957         }
958
959         if (link == NULL) {
960                 EH_LOG_ERR("No valid link found for lcore %d", lcore_id);
961                 return NULL;
962         }
963
964         /* Get event dev ID */
965         eventdev_id = link->eventdev_id;
966
967         /* Populate the curr_conf with the capabilities */
968
969         /* Check for Tx internal port */
970         if (eh_dev_has_tx_internal_port(eventdev_id))
971                 curr_conf.cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
972         else
973                 curr_conf.cap.tx_internal_port = EH_TX_TYPE_NO_INTERNAL_PORT;
974
975         /* Check for burst mode */
976         if (eh_dev_has_burst_mode(eventdev_id))
977                 curr_conf.cap.burst = EH_RX_TYPE_BURST;
978         else
979                 curr_conf.cap.burst = EH_RX_TYPE_NON_BURST;
980
981         curr_conf.cap.ipsec_mode = conf->ipsec_mode;
982
983         /* Parse the passed list and see if we have matching capabilities */
984
985         /* Initialize the pointer used to traverse the list */
986         tmp_wrkr = app_wrkrs;
987
988         for (i = 0; i < nb_wrkr_param; i++, tmp_wrkr++) {
989
990                 /* Skip this if capabilities are not matching */
991                 if (tmp_wrkr->cap.u64 != curr_conf.cap.u64)
992                         continue;
993
994                 /* If the checks pass, we have a match */
995                 return tmp_wrkr;
996         }
997
998         return NULL;
999 }
1000
1001 static int
1002 eh_verify_match_worker(struct eh_app_worker_params *match_wrkr)
1003 {
1004         /* Verify registered worker */
1005         if (match_wrkr->worker_thread == NULL) {
1006                 EH_LOG_ERR("No worker registered");
1007                 return 0;
1008         }
1009
1010         /* Success */
1011         return 1;
1012 }
1013
1014 static uint8_t
1015 eh_get_event_lcore_links(uint32_t lcore_id, struct eh_conf *conf,
1016                 struct eh_event_link_info **links)
1017 {
1018         struct eh_event_link_info *link_cache;
1019         struct eventmode_conf *em_conf = NULL;
1020         struct eh_event_link_info *link;
1021         uint8_t lcore_nb_link = 0;
1022         size_t single_link_size;
1023         size_t cache_size;
1024         int index = 0;
1025         int i;
1026
1027         if (conf == NULL || links == NULL) {
1028                 EH_LOG_ERR("Invalid args");
1029                 return -EINVAL;
1030         }
1031
1032         /* Get eventmode conf */
1033         em_conf = conf->mode_params;
1034
1035         if (em_conf == NULL) {
1036                 EH_LOG_ERR("Invalid event mode parameters");
1037                 return -EINVAL;
1038         }
1039
1040         /* Get the number of links registered */
1041         for (i = 0; i < em_conf->nb_link; i++) {
1042
1043                 /* Get link */
1044                 link = &(em_conf->link[i]);
1045
1046                 /* Check if we have link intended for this lcore */
1047                 if (link->lcore_id == lcore_id) {
1048
1049                         /* Update the number of links for this core */
1050                         lcore_nb_link++;
1051
1052                 }
1053         }
1054
1055         /* Compute size of one entry to be copied */
1056         single_link_size = sizeof(struct eh_event_link_info);
1057
1058         /* Compute size of the buffer required */
1059         cache_size = lcore_nb_link * sizeof(struct eh_event_link_info);
1060
1061         /* Compute size of the buffer required */
1062         link_cache = calloc(1, cache_size);
1063
1064         /* Get the number of links registered */
1065         for (i = 0; i < em_conf->nb_link; i++) {
1066
1067                 /* Get link */
1068                 link = &(em_conf->link[i]);
1069
1070                 /* Check if we have link intended for this lcore */
1071                 if (link->lcore_id == lcore_id) {
1072
1073                         /* Cache the link */
1074                         memcpy(&link_cache[index], link, single_link_size);
1075
1076                         /* Update index */
1077                         index++;
1078                 }
1079         }
1080
1081         /* Update the links for application to use the cached links */
1082         *links = link_cache;
1083
1084         /* Return the number of cached links */
1085         return lcore_nb_link;
1086 }
1087
1088 static int
1089 eh_tx_adapter_configure(struct eventmode_conf *em_conf,
1090                 struct tx_adapter_conf *adapter)
1091 {
1092         struct rte_event_dev_info evdev_default_conf = {0};
1093         struct rte_event_port_conf port_conf = {0};
1094         struct tx_adapter_connection_info *conn;
1095         struct eventdev_params *eventdev_config;
1096         uint8_t tx_port_id = 0;
1097         uint8_t eventdev_id;
1098         uint32_t service_id;
1099         int ret, j;
1100
1101         /* Get event dev ID */
1102         eventdev_id = adapter->eventdev_id;
1103
1104         /* Get event device conf */
1105         eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id);
1106
1107         /* Create Tx adapter */
1108
1109         /* Get default configuration of event dev */
1110         ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
1111         if (ret < 0) {
1112                 EH_LOG_ERR("Failed to get event dev info %d", ret);
1113                 return ret;
1114         }
1115
1116         /* Setup port conf */
1117         port_conf.new_event_threshold =
1118                         evdev_default_conf.max_num_events;
1119         port_conf.dequeue_depth =
1120                         evdev_default_conf.max_event_port_dequeue_depth;
1121         port_conf.enqueue_depth =
1122                         evdev_default_conf.max_event_port_enqueue_depth;
1123
1124         /* Create adapter */
1125         ret = rte_event_eth_tx_adapter_create(adapter->adapter_id,
1126                         adapter->eventdev_id, &port_conf);
1127         if (ret < 0) {
1128                 EH_LOG_ERR("Failed to create tx adapter %d", ret);
1129                 return ret;
1130         }
1131
1132         /* Setup various connections in the adapter */
1133         for (j = 0; j < adapter->nb_connections; j++) {
1134
1135                 /* Get connection */
1136                 conn = &(adapter->conn[j]);
1137
1138                 /* Add queue to the adapter */
1139                 ret = rte_event_eth_tx_adapter_queue_add(adapter->adapter_id,
1140                                 conn->ethdev_id, conn->ethdev_tx_qid);
1141                 if (ret < 0) {
1142                         EH_LOG_ERR("Failed to add eth queue to tx adapter %d",
1143                                    ret);
1144                         return ret;
1145                 }
1146         }
1147
1148         /*
1149          * Check if Tx core is assigned. If Tx core is not assigned then
1150          * the adapter has internal port for submitting Tx packets and
1151          * Tx event queue & port setup is not required
1152          */
1153         if (adapter->tx_core_id == (uint32_t) (-1)) {
1154                 /* Internal port is present */
1155                 goto skip_tx_queue_port_setup;
1156         }
1157
1158         /* Setup Tx queue & port */
1159
1160         /* Get event port used by the adapter */
1161         ret = rte_event_eth_tx_adapter_event_port_get(
1162                         adapter->adapter_id, &tx_port_id);
1163         if (ret) {
1164                 EH_LOG_ERR("Failed to get tx adapter port id %d", ret);
1165                 return ret;
1166         }
1167
1168         /*
1169          * Tx event queue is reserved for Tx adapter. Unlink this queue
1170          * from all other ports
1171          *
1172          */
1173         for (j = 0; j < eventdev_config->nb_eventport; j++) {
1174                 rte_event_port_unlink(eventdev_id, j,
1175                                       &(adapter->tx_ev_queue), 1);
1176         }
1177
1178         /* Link Tx event queue to Tx port */
1179         ret = rte_event_port_link(eventdev_id, tx_port_id,
1180                         &(adapter->tx_ev_queue), NULL, 1);
1181         if (ret != 1) {
1182                 EH_LOG_ERR("Failed to link event queue to port");
1183                 return ret;
1184         }
1185
1186         /* Get the service ID used by Tx adapter */
1187         ret = rte_event_eth_tx_adapter_service_id_get(adapter->adapter_id,
1188                                                       &service_id);
1189         if (ret != -ESRCH && ret < 0) {
1190                 EH_LOG_ERR("Failed to get service id used by tx adapter %d",
1191                            ret);
1192                 return ret;
1193         }
1194
1195         rte_service_set_runstate_mapped_check(service_id, 0);
1196
1197 skip_tx_queue_port_setup:
1198         /* Start adapter */
1199         ret = rte_event_eth_tx_adapter_start(adapter->adapter_id);
1200         if (ret < 0) {
1201                 EH_LOG_ERR("Failed to start tx adapter %d", ret);
1202                 return ret;
1203         }
1204
1205         return 0;
1206 }
1207
1208 static int
1209 eh_initialize_tx_adapter(struct eventmode_conf *em_conf)
1210 {
1211         struct tx_adapter_conf *adapter;
1212         int i, ret;
1213
1214         /* Configure Tx adapters */
1215         for (i = 0; i < em_conf->nb_tx_adapter; i++) {
1216                 adapter = &(em_conf->tx_adapter[i]);
1217                 ret = eh_tx_adapter_configure(em_conf, adapter);
1218                 if (ret < 0) {
1219                         EH_LOG_ERR("Failed to configure tx adapter %d", ret);
1220                         return ret;
1221                 }
1222         }
1223         return 0;
1224 }
1225
1226 static void
1227 eh_display_operating_mode(struct eventmode_conf *em_conf)
1228 {
1229         char sched_types[][32] = {
1230                 "RTE_SCHED_TYPE_ORDERED",
1231                 "RTE_SCHED_TYPE_ATOMIC",
1232                 "RTE_SCHED_TYPE_PARALLEL",
1233         };
1234         EH_LOG_INFO("Operating mode:");
1235
1236         EH_LOG_INFO("\tScheduling type: \t%s",
1237                 sched_types[em_conf->ext_params.sched_type]);
1238
1239         EH_LOG_INFO("");
1240 }
1241
1242 static void
1243 eh_display_event_dev_conf(struct eventmode_conf *em_conf)
1244 {
1245         char queue_mode[][32] = {
1246                 "",
1247                 "ATQ (ALL TYPE QUEUE)",
1248                 "SINGLE LINK",
1249         };
1250         char print_buf[256] = { 0 };
1251         int i;
1252
1253         EH_LOG_INFO("Event Device Configuration:");
1254
1255         for (i = 0; i < em_conf->nb_eventdev; i++) {
1256                 sprintf(print_buf,
1257                         "\tDev ID: %-2d \tQueues: %-2d \tPorts: %-2d",
1258                         em_conf->eventdev_config[i].eventdev_id,
1259                         em_conf->eventdev_config[i].nb_eventqueue,
1260                         em_conf->eventdev_config[i].nb_eventport);
1261                 sprintf(print_buf + strlen(print_buf),
1262                         "\tQueue mode: %s",
1263                         queue_mode[em_conf->eventdev_config[i].ev_queue_mode]);
1264                 EH_LOG_INFO("%s", print_buf);
1265         }
1266         EH_LOG_INFO("");
1267 }
1268
1269 static void
1270 eh_display_rx_adapter_conf(struct eventmode_conf *em_conf)
1271 {
1272         int nb_rx_adapter = em_conf->nb_rx_adapter;
1273         struct rx_adapter_connection_info *conn;
1274         struct rx_adapter_conf *adapter;
1275         char print_buf[256] = { 0 };
1276         int i, j;
1277
1278         EH_LOG_INFO("Rx adapters configured: %d", nb_rx_adapter);
1279
1280         for (i = 0; i < nb_rx_adapter; i++) {
1281                 adapter = &(em_conf->rx_adapter[i]);
1282                 sprintf(print_buf,
1283                         "\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
1284                         adapter->adapter_id,
1285                         adapter->nb_connections,
1286                         adapter->eventdev_id);
1287                 if (adapter->rx_core_id == (uint32_t)-1)
1288                         sprintf(print_buf + strlen(print_buf),
1289                                 "\tRx core: %-2s", "[INTERNAL PORT]");
1290                 else if (adapter->rx_core_id == RTE_MAX_LCORE)
1291                         sprintf(print_buf + strlen(print_buf),
1292                                 "\tRx core: %-2s", "[NONE]");
1293                 else
1294                         sprintf(print_buf + strlen(print_buf),
1295                                 "\tRx core: %-2d", adapter->rx_core_id);
1296
1297                 EH_LOG_INFO("%s", print_buf);
1298
1299                 for (j = 0; j < adapter->nb_connections; j++) {
1300                         conn = &(adapter->conn[j]);
1301
1302                         sprintf(print_buf,
1303                                 "\t\tEthdev ID: %-2d", conn->ethdev_id);
1304
1305                         if (conn->ethdev_rx_qid == -1)
1306                                 sprintf(print_buf + strlen(print_buf),
1307                                         "\tEth rx queue: %-2s", "ALL");
1308                         else
1309                                 sprintf(print_buf + strlen(print_buf),
1310                                         "\tEth rx queue: %-2d",
1311                                         conn->ethdev_rx_qid);
1312
1313                         sprintf(print_buf + strlen(print_buf),
1314                                 "\tEvent queue: %-2d", conn->eventq_id);
1315                         EH_LOG_INFO("%s", print_buf);
1316                 }
1317         }
1318         EH_LOG_INFO("");
1319 }
1320
1321 static void
1322 eh_display_tx_adapter_conf(struct eventmode_conf *em_conf)
1323 {
1324         int nb_tx_adapter = em_conf->nb_tx_adapter;
1325         struct tx_adapter_connection_info *conn;
1326         struct tx_adapter_conf *adapter;
1327         char print_buf[256] = { 0 };
1328         int i, j;
1329
1330         EH_LOG_INFO("Tx adapters configured: %d", nb_tx_adapter);
1331
1332         for (i = 0; i < nb_tx_adapter; i++) {
1333                 adapter = &(em_conf->tx_adapter[i]);
1334                 sprintf(print_buf,
1335                         "\tTx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
1336                         adapter->adapter_id,
1337                         adapter->nb_connections,
1338                         adapter->eventdev_id);
1339                 if (adapter->tx_core_id == (uint32_t)-1)
1340                         sprintf(print_buf + strlen(print_buf),
1341                                 "\tTx core: %-2s", "[INTERNAL PORT]");
1342                 else if (adapter->tx_core_id == RTE_MAX_LCORE)
1343                         sprintf(print_buf + strlen(print_buf),
1344                                 "\tTx core: %-2s", "[NONE]");
1345                 else
1346                         sprintf(print_buf + strlen(print_buf),
1347                                 "\tTx core: %-2d,\tInput event queue: %-2d",
1348                                 adapter->tx_core_id, adapter->tx_ev_queue);
1349
1350                 EH_LOG_INFO("%s", print_buf);
1351
1352                 for (j = 0; j < adapter->nb_connections; j++) {
1353                         conn = &(adapter->conn[j]);
1354
1355                         sprintf(print_buf,
1356                                 "\t\tEthdev ID: %-2d", conn->ethdev_id);
1357
1358                         if (conn->ethdev_tx_qid == -1)
1359                                 sprintf(print_buf + strlen(print_buf),
1360                                         "\tEth tx queue: %-2s", "ALL");
1361                         else
1362                                 sprintf(print_buf + strlen(print_buf),
1363                                         "\tEth tx queue: %-2d",
1364                                         conn->ethdev_tx_qid);
1365                         EH_LOG_INFO("%s", print_buf);
1366                 }
1367         }
1368         EH_LOG_INFO("");
1369 }
1370
1371 static void
1372 eh_display_link_conf(struct eventmode_conf *em_conf)
1373 {
1374         struct eh_event_link_info *link;
1375         char print_buf[256] = { 0 };
1376         int i;
1377
1378         EH_LOG_INFO("Links configured: %d", em_conf->nb_link);
1379
1380         for (i = 0; i < em_conf->nb_link; i++) {
1381                 link = &(em_conf->link[i]);
1382
1383                 sprintf(print_buf,
1384                         "\tEvent dev ID: %-2d\tEvent port: %-2d",
1385                         link->eventdev_id,
1386                         link->event_port_id);
1387
1388                 if (em_conf->ext_params.all_ev_queue_to_ev_port)
1389                         sprintf(print_buf + strlen(print_buf),
1390                                 "Event queue: %-2s\t", "ALL");
1391                 else
1392                         sprintf(print_buf + strlen(print_buf),
1393                                 "Event queue: %-2d\t", link->eventq_id);
1394
1395                 sprintf(print_buf + strlen(print_buf),
1396                         "Lcore: %-2d", link->lcore_id);
1397                 EH_LOG_INFO("%s", print_buf);
1398         }
1399         EH_LOG_INFO("");
1400 }
1401
1402 struct eh_conf *
1403 eh_conf_init(void)
1404 {
1405         struct eventmode_conf *em_conf = NULL;
1406         struct eh_conf *conf = NULL;
1407         unsigned int eth_core_id;
1408         void *bitmap = NULL;
1409         uint32_t nb_bytes;
1410
1411         /* Allocate memory for config */
1412         conf = calloc(1, sizeof(struct eh_conf));
1413         if (conf == NULL) {
1414                 EH_LOG_ERR("Failed to allocate memory for eventmode helper "
1415                            "config");
1416                 return NULL;
1417         }
1418
1419         /* Set default conf */
1420
1421         /* Packet transfer mode: poll */
1422         conf->mode = EH_PKT_TRANSFER_MODE_POLL;
1423         conf->ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
1424
1425         /* Keep all ethernet ports enabled by default */
1426         conf->eth_portmask = -1;
1427
1428         /* Allocate memory for event mode params */
1429         conf->mode_params = calloc(1, sizeof(struct eventmode_conf));
1430         if (conf->mode_params == NULL) {
1431                 EH_LOG_ERR("Failed to allocate memory for event mode params");
1432                 goto free_conf;
1433         }
1434
1435         /* Get eventmode conf */
1436         em_conf = conf->mode_params;
1437
1438         /* Allocate and initialize bitmap for eth cores */
1439         nb_bytes = rte_bitmap_get_memory_footprint(RTE_MAX_LCORE);
1440         if (!nb_bytes) {
1441                 EH_LOG_ERR("Failed to get bitmap footprint");
1442                 goto free_em_conf;
1443         }
1444
1445         bitmap = rte_zmalloc("event-helper-ethcore-bitmap", nb_bytes,
1446                              RTE_CACHE_LINE_SIZE);
1447         if (!bitmap) {
1448                 EH_LOG_ERR("Failed to allocate memory for eth cores bitmap\n");
1449                 goto free_em_conf;
1450         }
1451
1452         em_conf->eth_core_mask = rte_bitmap_init(RTE_MAX_LCORE, bitmap,
1453                                                  nb_bytes);
1454         if (!em_conf->eth_core_mask) {
1455                 EH_LOG_ERR("Failed to initialize bitmap");
1456                 goto free_bitmap;
1457         }
1458
1459         /* Set schedule type as not set */
1460         em_conf->ext_params.sched_type = SCHED_TYPE_NOT_SET;
1461
1462         /* Set two cores as eth cores for Rx & Tx */
1463
1464         /* Use first core other than master core as Rx core */
1465         eth_core_id = rte_get_next_lcore(0,     /* curr core */
1466                                          1,     /* skip master core */
1467                                          0      /* wrap */);
1468
1469         rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
1470
1471         /* Use next core as Tx core */
1472         eth_core_id = rte_get_next_lcore(eth_core_id,   /* curr core */
1473                                          1,             /* skip master core */
1474                                          0              /* wrap */);
1475
1476         rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
1477
1478         return conf;
1479
1480 free_bitmap:
1481         rte_free(bitmap);
1482 free_em_conf:
1483         free(em_conf);
1484 free_conf:
1485         free(conf);
1486         return NULL;
1487 }
1488
1489 void
1490 eh_conf_uninit(struct eh_conf *conf)
1491 {
1492         struct eventmode_conf *em_conf = NULL;
1493
1494         if (!conf || !conf->mode_params)
1495                 return;
1496
1497         /* Get eventmode conf */
1498         em_conf = conf->mode_params;
1499
1500         /* Free evenmode configuration memory */
1501         rte_free(em_conf->eth_core_mask);
1502         free(em_conf);
1503         free(conf);
1504 }
1505
1506 void
1507 eh_display_conf(struct eh_conf *conf)
1508 {
1509         struct eventmode_conf *em_conf;
1510
1511         if (conf == NULL) {
1512                 EH_LOG_ERR("Invalid event helper configuration");
1513                 return;
1514         }
1515
1516         if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1517                 return;
1518
1519         if (conf->mode_params == NULL) {
1520                 EH_LOG_ERR("Invalid event mode parameters");
1521                 return;
1522         }
1523
1524         /* Get eventmode conf */
1525         em_conf = (struct eventmode_conf *)(conf->mode_params);
1526
1527         /* Display user exposed operating modes */
1528         eh_display_operating_mode(em_conf);
1529
1530         /* Display event device conf */
1531         eh_display_event_dev_conf(em_conf);
1532
1533         /* Display Rx adapter conf */
1534         eh_display_rx_adapter_conf(em_conf);
1535
1536         /* Display Tx adapter conf */
1537         eh_display_tx_adapter_conf(em_conf);
1538
1539         /* Display event-lcore link */
1540         eh_display_link_conf(em_conf);
1541 }
1542
1543 int32_t
1544 eh_devs_init(struct eh_conf *conf)
1545 {
1546         struct eventmode_conf *em_conf;
1547         uint16_t port_id;
1548         int ret;
1549
1550         if (conf == NULL) {
1551                 EH_LOG_ERR("Invalid event helper configuration");
1552                 return -EINVAL;
1553         }
1554
1555         if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1556                 return 0;
1557
1558         if (conf->mode_params == NULL) {
1559                 EH_LOG_ERR("Invalid event mode parameters");
1560                 return -EINVAL;
1561         }
1562
1563         /* Get eventmode conf */
1564         em_conf = conf->mode_params;
1565
1566         /* Eventmode conf would need eth portmask */
1567         em_conf->eth_portmask = conf->eth_portmask;
1568
1569         /* Validate the requested config */
1570         ret = eh_validate_conf(em_conf);
1571         if (ret < 0) {
1572                 EH_LOG_ERR("Failed to validate the requested config %d", ret);
1573                 return ret;
1574         }
1575
1576         /* Display the current configuration */
1577         eh_display_conf(conf);
1578
1579         /* Stop eth devices before setting up adapter */
1580         RTE_ETH_FOREACH_DEV(port_id) {
1581
1582                 /* Use only the ports enabled */
1583                 if ((conf->eth_portmask & (1 << port_id)) == 0)
1584                         continue;
1585
1586                 ret = rte_eth_dev_stop(port_id);
1587                 if (ret != 0) {
1588                         EH_LOG_ERR("Failed to stop port %u, err: %d",
1589                                         port_id, ret);
1590                         return ret;
1591                 }
1592         }
1593
1594         /* Setup eventdev */
1595         ret = eh_initialize_eventdev(em_conf);
1596         if (ret < 0) {
1597                 EH_LOG_ERR("Failed to initialize event dev %d", ret);
1598                 return ret;
1599         }
1600
1601         /* Setup Rx adapter */
1602         ret = eh_initialize_rx_adapter(em_conf);
1603         if (ret < 0) {
1604                 EH_LOG_ERR("Failed to initialize rx adapter %d", ret);
1605                 return ret;
1606         }
1607
1608         /* Setup Tx adapter */
1609         ret = eh_initialize_tx_adapter(em_conf);
1610         if (ret < 0) {
1611                 EH_LOG_ERR("Failed to initialize tx adapter %d", ret);
1612                 return ret;
1613         }
1614
1615         /* Start eth devices after setting up adapter */
1616         RTE_ETH_FOREACH_DEV(port_id) {
1617
1618                 /* Use only the ports enabled */
1619                 if ((conf->eth_portmask & (1 << port_id)) == 0)
1620                         continue;
1621
1622                 ret = rte_eth_dev_start(port_id);
1623                 if (ret < 0) {
1624                         EH_LOG_ERR("Failed to start eth dev %d, %d",
1625                                    port_id, ret);
1626                         return ret;
1627                 }
1628         }
1629
1630         return 0;
1631 }
1632
1633 int32_t
1634 eh_devs_uninit(struct eh_conf *conf)
1635 {
1636         struct eventmode_conf *em_conf;
1637         int ret, i, j;
1638         uint16_t id;
1639
1640         if (conf == NULL) {
1641                 EH_LOG_ERR("Invalid event helper configuration");
1642                 return -EINVAL;
1643         }
1644
1645         if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1646                 return 0;
1647
1648         if (conf->mode_params == NULL) {
1649                 EH_LOG_ERR("Invalid event mode parameters");
1650                 return -EINVAL;
1651         }
1652
1653         /* Get eventmode conf */
1654         em_conf = conf->mode_params;
1655
1656         /* Stop and release rx adapters */
1657         for (i = 0; i < em_conf->nb_rx_adapter; i++) {
1658
1659                 id = em_conf->rx_adapter[i].adapter_id;
1660                 ret = rte_event_eth_rx_adapter_stop(id);
1661                 if (ret < 0) {
1662                         EH_LOG_ERR("Failed to stop rx adapter %d", ret);
1663                         return ret;
1664                 }
1665
1666                 for (j = 0; j < em_conf->rx_adapter[i].nb_connections; j++) {
1667
1668                         ret = rte_event_eth_rx_adapter_queue_del(id,
1669                                 em_conf->rx_adapter[i].conn[j].ethdev_id, -1);
1670                         if (ret < 0) {
1671                                 EH_LOG_ERR(
1672                                        "Failed to remove rx adapter queues %d",
1673                                        ret);
1674                                 return ret;
1675                         }
1676                 }
1677
1678                 ret = rte_event_eth_rx_adapter_free(id);
1679                 if (ret < 0) {
1680                         EH_LOG_ERR("Failed to free rx adapter %d", ret);
1681                         return ret;
1682                 }
1683         }
1684
1685         /* Stop and release event devices */
1686         for (i = 0; i < em_conf->nb_eventdev; i++) {
1687
1688                 id = em_conf->eventdev_config[i].eventdev_id;
1689                 rte_event_dev_stop(id);
1690
1691                 ret = rte_event_dev_close(id);
1692                 if (ret < 0) {
1693                         EH_LOG_ERR("Failed to close event dev %d, %d", id, ret);
1694                         return ret;
1695                 }
1696         }
1697
1698         /* Stop and release tx adapters */
1699         for (i = 0; i < em_conf->nb_tx_adapter; i++) {
1700
1701                 id = em_conf->tx_adapter[i].adapter_id;
1702                 ret = rte_event_eth_tx_adapter_stop(id);
1703                 if (ret < 0) {
1704                         EH_LOG_ERR("Failed to stop tx adapter %d", ret);
1705                         return ret;
1706                 }
1707
1708                 for (j = 0; j < em_conf->tx_adapter[i].nb_connections; j++) {
1709
1710                         ret = rte_event_eth_tx_adapter_queue_del(id,
1711                                 em_conf->tx_adapter[i].conn[j].ethdev_id, -1);
1712                         if (ret < 0) {
1713                                 EH_LOG_ERR(
1714                                         "Failed to remove tx adapter queues %d",
1715                                         ret);
1716                                 return ret;
1717                         }
1718                 }
1719
1720                 ret = rte_event_eth_tx_adapter_free(id);
1721                 if (ret < 0) {
1722                         EH_LOG_ERR("Failed to free tx adapter %d", ret);
1723                         return ret;
1724                 }
1725         }
1726
1727         return 0;
1728 }
1729
1730 void
1731 eh_launch_worker(struct eh_conf *conf, struct eh_app_worker_params *app_wrkr,
1732                 uint8_t nb_wrkr_param)
1733 {
1734         struct eh_app_worker_params *match_wrkr;
1735         struct eh_event_link_info *links = NULL;
1736         struct eventmode_conf *em_conf;
1737         uint32_t lcore_id;
1738         uint8_t nb_links;
1739
1740         if (conf == NULL) {
1741                 EH_LOG_ERR("Invalid event helper configuration");
1742                 return;
1743         }
1744
1745         if (conf->mode_params == NULL) {
1746                 EH_LOG_ERR("Invalid event mode parameters");
1747                 return;
1748         }
1749
1750         /* Get eventmode conf */
1751         em_conf = conf->mode_params;
1752
1753         /* Get core ID */
1754         lcore_id = rte_lcore_id();
1755
1756         /* Check if this is eth core */
1757         if (rte_bitmap_get(em_conf->eth_core_mask, lcore_id)) {
1758                 eh_start_worker_eth_core(em_conf, lcore_id);
1759                 return;
1760         }
1761
1762         if (app_wrkr == NULL || nb_wrkr_param == 0) {
1763                 EH_LOG_ERR("Invalid args");
1764                 return;
1765         }
1766
1767         /*
1768          * This is a regular worker thread. The application registers
1769          * multiple workers with various capabilities. Run worker
1770          * based on the selected capabilities of the event
1771          * device configured.
1772          */
1773
1774         /* Get the first matching worker for the event device */
1775         match_wrkr = eh_find_worker(lcore_id, conf, app_wrkr, nb_wrkr_param);
1776         if (match_wrkr == NULL) {
1777                 EH_LOG_ERR("Failed to match worker registered for lcore %d",
1778                            lcore_id);
1779                 goto clean_and_exit;
1780         }
1781
1782         /* Verify sanity of the matched worker */
1783         if (eh_verify_match_worker(match_wrkr) != 1) {
1784                 EH_LOG_ERR("Failed to validate the matched worker");
1785                 goto clean_and_exit;
1786         }
1787
1788         /* Get worker links */
1789         nb_links = eh_get_event_lcore_links(lcore_id, conf, &links);
1790
1791         /* Launch the worker thread */
1792         match_wrkr->worker_thread(links, nb_links);
1793
1794         /* Free links info memory */
1795         free(links);
1796
1797 clean_and_exit:
1798
1799         /* Flag eth_cores to stop, if started */
1800         eh_stop_worker_eth_core();
1801 }
1802
1803 uint8_t
1804 eh_get_tx_queue(struct eh_conf *conf, uint8_t eventdev_id)
1805 {
1806         struct eventdev_params *eventdev_config;
1807         struct eventmode_conf *em_conf;
1808
1809         if (conf == NULL) {
1810                 EH_LOG_ERR("Invalid event helper configuration");
1811                 return -EINVAL;
1812         }
1813
1814         if (conf->mode_params == NULL) {
1815                 EH_LOG_ERR("Invalid event mode parameters");
1816                 return -EINVAL;
1817         }
1818
1819         /* Get eventmode conf */
1820         em_conf = conf->mode_params;
1821
1822         /* Get event device conf */
1823         eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id);
1824
1825         if (eventdev_config == NULL) {
1826                 EH_LOG_ERR("Failed to read eventdev config");
1827                 return -EINVAL;
1828         }
1829
1830         /*
1831          * The last queue is reserved to be used as atomic queue for the
1832          * last stage (eth packet tx stage)
1833          */
1834         return eventdev_config->nb_eventqueue - 1;
1835 }