mempool: fix slow allocation of large mempools
[dpdk.git] / examples / l2fwd-event / l2fwd_event_generic.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <stdbool.h>
6 #include <getopt.h>
7
8 #include <rte_cycles.h>
9 #include <rte_ethdev.h>
10 #include <rte_eventdev.h>
11 #include <rte_event_eth_rx_adapter.h>
12 #include <rte_event_eth_tx_adapter.h>
13 #include <rte_lcore.h>
14 #include <rte_spinlock.h>
15
16 #include "l2fwd_common.h"
17 #include "l2fwd_event.h"
18
19 static uint32_t
20 l2fwd_event_device_setup_generic(struct l2fwd_resources *rsrc)
21 {
22         struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
23         struct rte_event_dev_config event_d_conf = {
24                 .nb_events_limit  = 4096,
25                 .nb_event_queue_flows = 1024,
26                 .nb_event_port_dequeue_depth = 128,
27                 .nb_event_port_enqueue_depth = 128
28         };
29         struct rte_event_dev_info dev_info;
30         const uint8_t event_d_id = 0; /* Always use first event device only */
31         uint32_t event_queue_cfg = 0;
32         uint16_t ethdev_count = 0;
33         uint16_t num_workers = 0;
34         uint16_t port_id;
35         int ret;
36
37         RTE_ETH_FOREACH_DEV(port_id) {
38                 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
39                         continue;
40                 ethdev_count++;
41         }
42
43         /* Event device configurtion */
44         rte_event_dev_info_get(event_d_id, &dev_info);
45         evt_rsrc->disable_implicit_release = !!(dev_info.event_dev_cap &
46                                     RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
47
48         if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
49                 event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
50
51         /* One queue for each ethdev port + one Tx adapter Single link queue. */
52         event_d_conf.nb_event_queues = ethdev_count + 1;
53         if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
54                 event_d_conf.nb_event_queues = dev_info.max_event_queues;
55
56         if (dev_info.max_num_events < event_d_conf.nb_events_limit)
57                 event_d_conf.nb_events_limit = dev_info.max_num_events;
58
59         if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
60                 event_d_conf.nb_event_queue_flows =
61                                                 dev_info.max_event_queue_flows;
62
63         if (dev_info.max_event_port_dequeue_depth <
64                                 event_d_conf.nb_event_port_dequeue_depth)
65                 event_d_conf.nb_event_port_dequeue_depth =
66                                 dev_info.max_event_port_dequeue_depth;
67
68         if (dev_info.max_event_port_enqueue_depth <
69                                 event_d_conf.nb_event_port_enqueue_depth)
70                 event_d_conf.nb_event_port_enqueue_depth =
71                                 dev_info.max_event_port_enqueue_depth;
72
73         num_workers = rte_lcore_count() - rte_service_lcore_count();
74         if (dev_info.max_event_ports < num_workers)
75                 num_workers = dev_info.max_event_ports;
76
77         event_d_conf.nb_event_ports = num_workers;
78         evt_rsrc->evp.nb_ports = num_workers;
79         evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues;
80
81         evt_rsrc->has_burst = !!(dev_info.event_dev_cap &
82                                     RTE_EVENT_DEV_CAP_BURST_MODE);
83
84         ret = rte_event_dev_configure(event_d_id, &event_d_conf);
85         if (ret < 0)
86                 rte_panic("Error in configuring event device\n");
87
88         evt_rsrc->event_d_id = event_d_id;
89         return event_queue_cfg;
90 }
91
92 static void
93 l2fwd_event_port_setup_generic(struct l2fwd_resources *rsrc)
94 {
95         struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
96         uint8_t event_d_id = evt_rsrc->event_d_id;
97         struct rte_event_port_conf event_p_conf = {
98                 .dequeue_depth = 32,
99                 .enqueue_depth = 32,
100                 .new_event_threshold = 4096
101         };
102         struct rte_event_port_conf def_p_conf;
103         uint8_t event_p_id;
104         int32_t ret;
105
106         evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
107                                         evt_rsrc->evp.nb_ports);
108         if (!evt_rsrc->evp.event_p_id)
109                 rte_panic("No space is available\n");
110
111         memset(&def_p_conf, 0, sizeof(struct rte_event_port_conf));
112         rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
113
114         if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
115                 event_p_conf.new_event_threshold =
116                         def_p_conf.new_event_threshold;
117
118         if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
119                 event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
120
121         if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
122                 event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
123
124         event_p_conf.disable_implicit_release =
125                 evt_rsrc->disable_implicit_release;
126         evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
127
128         for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
129                                                                 event_p_id++) {
130                 ret = rte_event_port_setup(event_d_id, event_p_id,
131                                            &event_p_conf);
132                 if (ret < 0)
133                         rte_panic("Error in configuring event port %d\n",
134                                   event_p_id);
135
136                 ret = rte_event_port_link(event_d_id, event_p_id,
137                                           evt_rsrc->evq.event_q_id,
138                                           NULL,
139                                           evt_rsrc->evq.nb_queues - 1);
140                 if (ret != (evt_rsrc->evq.nb_queues - 1))
141                         rte_panic("Error in linking event port %d to queues\n",
142                                   event_p_id);
143                 evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
144         }
145         /* init spinlock */
146         rte_spinlock_init(&evt_rsrc->evp.lock);
147
148         evt_rsrc->def_p_conf = event_p_conf;
149 }
150
151 static void
152 l2fwd_event_queue_setup_generic(struct l2fwd_resources *rsrc,
153                           uint32_t event_queue_cfg)
154 {
155         struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
156         uint8_t event_d_id = evt_rsrc->event_d_id;
157         struct rte_event_queue_conf event_q_conf = {
158                 .nb_atomic_flows = 1024,
159                 .nb_atomic_order_sequences = 1024,
160                 .event_queue_cfg = event_queue_cfg,
161                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
162         };
163         struct rte_event_queue_conf def_q_conf;
164         uint8_t event_q_id;
165         int32_t ret;
166
167         event_q_conf.schedule_type = rsrc->sched_type;
168         evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
169                                         evt_rsrc->evq.nb_queues);
170         if (!evt_rsrc->evq.event_q_id)
171                 rte_panic("Memory allocation failure\n");
172
173         rte_event_queue_default_conf_get(event_d_id, 0, &def_q_conf);
174         if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
175                 event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
176
177         for (event_q_id = 0; event_q_id < (evt_rsrc->evq.nb_queues - 1);
178                                                                 event_q_id++) {
179                 ret = rte_event_queue_setup(event_d_id, event_q_id,
180                                             &event_q_conf);
181                 if (ret < 0)
182                         rte_panic("Error in configuring event queue\n");
183                 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
184         }
185
186         event_q_conf.event_queue_cfg |= RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
187         event_q_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
188         ret = rte_event_queue_setup(event_d_id, event_q_id, &event_q_conf);
189         if (ret < 0)
190                 rte_panic("Error in configuring event queue for Tx adapter\n");
191         evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
192 }
193
194 static void
195 l2fwd_rx_tx_adapter_setup_generic(struct l2fwd_resources *rsrc)
196 {
197         struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
198         struct rte_event_eth_rx_adapter_queue_conf eth_q_conf;
199         uint8_t event_d_id = evt_rsrc->event_d_id;
200         uint8_t rx_adptr_id = 0;
201         uint8_t tx_adptr_id = 0;
202         uint8_t tx_port_id = 0;
203         uint16_t port_id;
204         uint32_t service_id;
205         int32_t ret, i = 0;
206
207         memset(&eth_q_conf, 0, sizeof(eth_q_conf));
208         eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
209
210         /* Rx adapter setup */
211         evt_rsrc->rx_adptr.nb_rx_adptr = 1;
212         evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
213                                         evt_rsrc->rx_adptr.nb_rx_adptr);
214         if (!evt_rsrc->rx_adptr.rx_adptr) {
215                 free(evt_rsrc->evp.event_p_id);
216                 free(evt_rsrc->evq.event_q_id);
217                 rte_panic("Failed to allocate memery for Rx adapter\n");
218         }
219
220         ret = rte_event_eth_rx_adapter_create(rx_adptr_id, event_d_id,
221                                               &evt_rsrc->def_p_conf);
222         if (ret)
223                 rte_panic("Failed to create rx adapter\n");
224
225         /* Configure user requested sched type */
226         eth_q_conf.ev.sched_type = rsrc->sched_type;
227         RTE_ETH_FOREACH_DEV(port_id) {
228                 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
229                         continue;
230                 eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[i];
231                 ret = rte_event_eth_rx_adapter_queue_add(rx_adptr_id, port_id,
232                                                          -1, &eth_q_conf);
233                 if (ret)
234                         rte_panic("Failed to add queues to Rx adapter\n");
235                 if (i < evt_rsrc->evq.nb_queues)
236                         i++;
237         }
238
239         ret = rte_event_eth_rx_adapter_service_id_get(rx_adptr_id, &service_id);
240         if (ret != -ESRCH && ret != 0)
241                 rte_panic("Error getting the service ID for rx adptr\n");
242
243         rte_service_runstate_set(service_id, 1);
244         rte_service_set_runstate_mapped_check(service_id, 0);
245         evt_rsrc->rx_adptr.service_id = service_id;
246
247         ret = rte_event_eth_rx_adapter_start(rx_adptr_id);
248         if (ret)
249                 rte_panic("Rx adapter[%d] start Failed\n", rx_adptr_id);
250
251         evt_rsrc->rx_adptr.rx_adptr[0] = rx_adptr_id;
252
253         /* Tx adapter setup */
254         evt_rsrc->tx_adptr.nb_tx_adptr = 1;
255         evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
256                                         evt_rsrc->tx_adptr.nb_tx_adptr);
257         if (!evt_rsrc->tx_adptr.tx_adptr) {
258                 free(evt_rsrc->rx_adptr.rx_adptr);
259                 free(evt_rsrc->evp.event_p_id);
260                 free(evt_rsrc->evq.event_q_id);
261                 rte_panic("Failed to allocate memery for Rx adapter\n");
262         }
263
264         ret = rte_event_eth_tx_adapter_create(tx_adptr_id, event_d_id,
265                                               &evt_rsrc->def_p_conf);
266         if (ret)
267                 rte_panic("Failed to create tx adapter\n");
268
269         RTE_ETH_FOREACH_DEV(port_id) {
270                 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
271                         continue;
272                 ret = rte_event_eth_tx_adapter_queue_add(tx_adptr_id, port_id,
273                                                          -1);
274                 if (ret)
275                         rte_panic("Failed to add queues to Tx adapter\n");
276         }
277
278         ret = rte_event_eth_tx_adapter_service_id_get(tx_adptr_id, &service_id);
279         if (ret != -ESRCH && ret != 0)
280                 rte_panic("Failed to get Tx adapter service ID\n");
281
282         rte_service_runstate_set(service_id, 1);
283         rte_service_set_runstate_mapped_check(service_id, 0);
284         evt_rsrc->tx_adptr.service_id = service_id;
285
286         ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id);
287         if (ret)
288                 rte_panic("Failed to get Tx adapter port id: %d\n", ret);
289
290         ret = rte_event_port_link(event_d_id, tx_port_id,
291                                   &evt_rsrc->evq.event_q_id[
292                                         evt_rsrc->evq.nb_queues - 1],
293                                   NULL, 1);
294         if (ret != 1)
295                 rte_panic("Unable to link Tx adapter port to Tx queue:err=%d\n",
296                          ret);
297
298         ret = rte_event_eth_tx_adapter_start(tx_adptr_id);
299         if (ret)
300                 rte_panic("Tx adapter[%d] start Failed\n", tx_adptr_id);
301
302         evt_rsrc->tx_adptr.tx_adptr[0] = tx_adptr_id;
303 }
304
305 void
306 l2fwd_event_set_generic_ops(struct event_setup_ops *ops)
307 {
308         ops->event_device_setup = l2fwd_event_device_setup_generic;
309         ops->event_queue_setup = l2fwd_event_queue_setup_generic;
310         ops->event_port_setup = l2fwd_event_port_setup_generic;
311         ops->adapter_setup = l2fwd_rx_tx_adapter_setup_generic;
312 }