common/cnxk: allow building for generic arm64
[dpdk.git] / examples / l2fwd-event / l2fwd_event.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <stdbool.h>
6 #include <getopt.h>
7
8 #include <rte_cycles.h>
9 #include <rte_ethdev.h>
10 #include <rte_eventdev.h>
11 #include <rte_event_eth_rx_adapter.h>
12 #include <rte_event_eth_tx_adapter.h>
13 #include <rte_lcore.h>
14 #include <rte_malloc.h>
15 #include <rte_spinlock.h>
16
17 #include "l2fwd_event.h"
18
19 #define L2FWD_EVENT_SINGLE      0x1
20 #define L2FWD_EVENT_BURST       0x2
21 #define L2FWD_EVENT_TX_DIRECT   0x4
22 #define L2FWD_EVENT_TX_ENQ      0x8
23 #define L2FWD_EVENT_UPDT_MAC    0x10
24
25 static inline int
26 l2fwd_event_service_enable(uint32_t service_id)
27 {
28         uint8_t min_service_count = UINT8_MAX;
29         uint32_t slcore_array[RTE_MAX_LCORE];
30         unsigned int slcore = 0;
31         uint8_t service_count;
32         int32_t slcore_count;
33
34         if (!rte_service_lcore_count())
35                 return -ENOENT;
36
37         slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
38         if (slcore_count < 0)
39                 return -ENOENT;
40         /* Get the core which has least number of services running. */
41         while (slcore_count--) {
42                 /* Reset default mapping */
43                 if (rte_service_map_lcore_set(service_id,
44                                         slcore_array[slcore_count], 0) != 0)
45                         return -ENOENT;
46                 service_count = rte_service_lcore_count_services(
47                                 slcore_array[slcore_count]);
48                 if (service_count < min_service_count) {
49                         slcore = slcore_array[slcore_count];
50                         min_service_count = service_count;
51                 }
52         }
53         if (rte_service_map_lcore_set(service_id, slcore, 1) != 0)
54                 return -ENOENT;
55         rte_service_lcore_start(slcore);
56
57         return 0;
58 }
59
60 void
61 l2fwd_event_service_setup(struct l2fwd_resources *rsrc)
62 {
63         struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
64         struct rte_event_dev_info evdev_info;
65         uint32_t service_id, caps;
66         int ret, i;
67
68         /* Running eventdev scheduler service on service core. 8< */
69         rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
70         if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
71                 ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
72                                 &service_id);
73                 if (ret != -ESRCH && ret != 0)
74                         rte_panic("Error in starting eventdev service\n");
75                 l2fwd_event_service_enable(service_id);
76         }
77         /* >8 End of running eventdev scheduler service on service core. */
78
79         /* Gets service ID for RX/TX adapters. 8< */
80         for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
81                 ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
82                                 evt_rsrc->rx_adptr.rx_adptr[i], &caps);
83                 if (ret < 0)
84                         rte_panic("Failed to get Rx adapter[%d] caps\n",
85                                   evt_rsrc->rx_adptr.rx_adptr[i]);
86                 ret = rte_event_eth_rx_adapter_service_id_get(
87                                 evt_rsrc->event_d_id,
88                                 &service_id);
89                 if (ret != -ESRCH && ret != 0)
90                         rte_panic("Error in starting Rx adapter[%d] service\n",
91                                   evt_rsrc->rx_adptr.rx_adptr[i]);
92                 l2fwd_event_service_enable(service_id);
93         }
94
95         for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
96                 ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
97                                 evt_rsrc->tx_adptr.tx_adptr[i], &caps);
98                 if (ret < 0)
99                         rte_panic("Failed to get Rx adapter[%d] caps\n",
100                                   evt_rsrc->tx_adptr.tx_adptr[i]);
101                 ret = rte_event_eth_tx_adapter_service_id_get(
102                                 evt_rsrc->event_d_id,
103                                 &service_id);
104                 if (ret != -ESRCH && ret != 0)
105                         rte_panic("Error in starting Rx adapter[%d] service\n",
106                                   evt_rsrc->tx_adptr.tx_adptr[i]);
107                 l2fwd_event_service_enable(service_id);
108         }
109         /* >8 End of get service ID for RX/TX adapters. */
110 }
111
112 static void
113 l2fwd_event_capability_setup(struct l2fwd_event_resources *evt_rsrc)
114 {
115         uint32_t caps = 0;
116         uint16_t i;
117         int ret;
118
119         RTE_ETH_FOREACH_DEV(i) {
120                 ret = rte_event_eth_tx_adapter_caps_get(0, i, &caps);
121                 if (ret)
122                         rte_panic("Invalid capability for Tx adptr port %d\n",
123                                   i);
124
125                 evt_rsrc->tx_mode_q |= !(caps &
126                                    RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
127         }
128
129         if (evt_rsrc->tx_mode_q)
130                 l2fwd_event_set_generic_ops(&evt_rsrc->ops);
131         else
132                 l2fwd_event_set_internal_port_ops(&evt_rsrc->ops);
133 }
134
135 static __rte_noinline int
136 l2fwd_get_free_event_port(struct l2fwd_event_resources *evt_rsrc)
137 {
138         static int index;
139         int port_id;
140
141         rte_spinlock_lock(&evt_rsrc->evp.lock);
142         if (index >= evt_rsrc->evp.nb_ports) {
143                 printf("No free event port is available\n");
144                 return -1;
145         }
146
147         port_id = evt_rsrc->evp.event_p_id[index];
148         index++;
149         rte_spinlock_unlock(&evt_rsrc->evp.lock);
150
151         return port_id;
152 }
153
154 static  __rte_always_inline void
155 l2fwd_event_fwd(struct l2fwd_resources *rsrc, struct rte_event *ev,
156                 const uint8_t tx_q_id, const uint64_t timer_period,
157                 const uint32_t flags)
158 {
159         struct rte_mbuf *mbuf = ev->mbuf;
160         uint16_t dst_port;
161
162         rte_prefetch0(rte_pktmbuf_mtod(mbuf, void *));
163         dst_port = rsrc->dst_ports[mbuf->port];
164
165         if (timer_period > 0)
166                 __atomic_fetch_add(&rsrc->port_stats[mbuf->port].rx,
167                                 1, __ATOMIC_RELAXED);
168         mbuf->port = dst_port;
169
170         if (flags & L2FWD_EVENT_UPDT_MAC)
171                 l2fwd_mac_updating(mbuf, dst_port, &rsrc->eth_addr[dst_port]);
172
173         if (flags & L2FWD_EVENT_TX_ENQ) {
174                 ev->queue_id = tx_q_id;
175                 ev->op = RTE_EVENT_OP_FORWARD;
176         }
177
178         if (flags & L2FWD_EVENT_TX_DIRECT)
179                 rte_event_eth_tx_adapter_txq_set(mbuf, 0);
180
181         if (timer_period > 0)
182                 __atomic_fetch_add(&rsrc->port_stats[mbuf->port].tx,
183                                 1, __ATOMIC_RELAXED);
184 }
185
186 static __rte_always_inline void
187 l2fwd_event_loop_single(struct l2fwd_resources *rsrc,
188                         const uint32_t flags)
189 {
190         struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
191         const int port_id = l2fwd_get_free_event_port(evt_rsrc);
192         const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
193                                         evt_rsrc->evq.nb_queues - 1];
194         const uint64_t timer_period = rsrc->timer_period;
195         const uint8_t event_d_id = evt_rsrc->event_d_id;
196         uint8_t enq = 0, deq = 0;
197         struct rte_event ev;
198
199         if (port_id < 0)
200                 return;
201
202         printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
203                 rte_lcore_id());
204
205         while (!rsrc->force_quit) {
206                 /* Read packet from eventdev */
207                 deq = rte_event_dequeue_burst(event_d_id, port_id, &ev, 1, 0);
208                 if (!deq)
209                         continue;
210
211                 l2fwd_event_fwd(rsrc, &ev, tx_q_id, timer_period, flags);
212
213                 if (flags & L2FWD_EVENT_TX_ENQ) {
214                         do {
215                                 enq = rte_event_enqueue_burst(event_d_id,
216                                                               port_id, &ev, 1);
217                         } while (!enq && !rsrc->force_quit);
218                 }
219
220                 if (flags & L2FWD_EVENT_TX_DIRECT) {
221                         do {
222                                 enq = rte_event_eth_tx_adapter_enqueue(
223                                         event_d_id, port_id, &ev, 1, 0);
224                         } while (!enq && !rsrc->force_quit);
225                 }
226         }
227
228         l2fwd_event_worker_cleanup(event_d_id, port_id, &ev, enq, deq, 0);
229 }
230
231 static __rte_always_inline void
232 l2fwd_event_loop_burst(struct l2fwd_resources *rsrc,
233                        const uint32_t flags)
234 {
235         struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
236         const int port_id = l2fwd_get_free_event_port(evt_rsrc);
237         const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
238                                         evt_rsrc->evq.nb_queues - 1];
239         const uint64_t timer_period = rsrc->timer_period;
240         const uint8_t event_d_id = evt_rsrc->event_d_id;
241         const uint8_t deq_len = evt_rsrc->deq_depth;
242         struct rte_event ev[MAX_PKT_BURST];
243         uint16_t nb_rx = 0, nb_tx = 0;
244         uint8_t i;
245
246         if (port_id < 0)
247                 return;
248
249         printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
250                 rte_lcore_id());
251
252         while (!rsrc->force_quit) {
253                 /* Read packet from eventdev. 8< */
254                 nb_rx = rte_event_dequeue_burst(event_d_id, port_id, ev,
255                                                 deq_len, 0);
256                 if (nb_rx == 0)
257                         continue;
258
259                 for (i = 0; i < nb_rx; i++) {
260                         l2fwd_event_fwd(rsrc, &ev[i], tx_q_id, timer_period,
261                                         flags);
262                 }
263                 /* >8 End of reading packets from eventdev. */
264
265                 if (flags & L2FWD_EVENT_TX_ENQ) {
266                         /* Forwarding to destination ports. 8< */
267                         nb_tx = rte_event_enqueue_burst(event_d_id, port_id,
268                                                         ev, nb_rx);
269                         while (nb_tx < nb_rx && !rsrc->force_quit)
270                                 nb_tx += rte_event_enqueue_burst(event_d_id,
271                                                 port_id, ev + nb_tx,
272                                                 nb_rx - nb_tx);
273                         /* >8 End of forwarding to destination ports. */
274                 }
275
276                 if (flags & L2FWD_EVENT_TX_DIRECT) {
277                         nb_tx = rte_event_eth_tx_adapter_enqueue(event_d_id,
278                                                                  port_id, ev,
279                                                                  nb_rx, 0);
280                         while (nb_tx < nb_rx && !rsrc->force_quit)
281                                 nb_tx += rte_event_eth_tx_adapter_enqueue(
282                                                 event_d_id, port_id,
283                                                 ev + nb_tx, nb_rx - nb_tx, 0);
284                 }
285         }
286
287         l2fwd_event_worker_cleanup(event_d_id, port_id, ev, nb_rx, nb_tx, 0);
288 }
289
290 static __rte_always_inline void
291 l2fwd_event_loop(struct l2fwd_resources *rsrc,
292                         const uint32_t flags)
293 {
294         if (flags & L2FWD_EVENT_SINGLE)
295                 l2fwd_event_loop_single(rsrc, flags);
296         if (flags & L2FWD_EVENT_BURST)
297                 l2fwd_event_loop_burst(rsrc, flags);
298 }
299
300 static void __rte_noinline
301 l2fwd_event_main_loop_tx_d(struct l2fwd_resources *rsrc)
302 {
303         l2fwd_event_loop(rsrc,
304                          L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_SINGLE);
305 }
306
307 static void __rte_noinline
308 l2fwd_event_main_loop_tx_d_brst(struct l2fwd_resources *rsrc)
309 {
310         l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_BURST);
311 }
312
313 static void __rte_noinline
314 l2fwd_event_main_loop_tx_q(struct l2fwd_resources *rsrc)
315 {
316         l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_SINGLE);
317 }
318
319 static void __rte_noinline
320 l2fwd_event_main_loop_tx_q_brst(struct l2fwd_resources *rsrc)
321 {
322         l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
323 }
324
325 static void __rte_noinline
326 l2fwd_event_main_loop_tx_d_mac(struct l2fwd_resources *rsrc)
327 {
328         l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
329                         L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_SINGLE);
330 }
331
332 static void __rte_noinline
333 l2fwd_event_main_loop_tx_d_brst_mac(struct l2fwd_resources *rsrc)
334 {
335         l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
336                         L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_BURST);
337 }
338
339 static void __rte_noinline
340 l2fwd_event_main_loop_tx_q_mac(struct l2fwd_resources *rsrc)
341 {
342         l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
343                         L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_SINGLE);
344 }
345
346 static void __rte_noinline
347 l2fwd_event_main_loop_tx_q_brst_mac(struct l2fwd_resources *rsrc)
348 {
349         l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
350                         L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
351 }
352
353 static __rte_always_inline void
354 l2fwd_event_vector_fwd(struct l2fwd_resources *rsrc,
355                        struct rte_event_vector *vec,
356                        const uint64_t timer_period, const uint32_t flags)
357 {
358         struct rte_mbuf **mbufs = vec->mbufs;
359         uint16_t i, j;
360
361         rte_prefetch0(rte_pktmbuf_mtod(mbufs[0], void *));
362
363         /* If vector attribute is valid, mbufs will be from same port/queue */
364         if (vec->attr_valid) {
365                 vec->port = rsrc->dst_ports[mbufs[0]->port];
366                 if (flags & L2FWD_EVENT_TX_DIRECT)
367                         vec->queue = 0;
368
369                 if (timer_period > 0)
370                         __atomic_fetch_add(&rsrc->port_stats[mbufs[0]->port].rx,
371                                            vec->nb_elem, __ATOMIC_RELAXED);
372
373                 for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
374                         if (j < vec->nb_elem)
375                                 rte_prefetch0(
376                                         rte_pktmbuf_mtod(mbufs[j], void *));
377
378                         if (flags & L2FWD_EVENT_UPDT_MAC)
379                                 l2fwd_mac_updating(
380                                         mbufs[i], vec->port,
381                                         &rsrc->eth_addr[vec->port]);
382                 }
383
384                 if (timer_period > 0)
385                         __atomic_fetch_add(&rsrc->port_stats[vec->port].tx,
386                                            vec->nb_elem, __ATOMIC_RELAXED);
387         } else {
388                 for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
389                         if (timer_period > 0)
390                                 __atomic_fetch_add(
391                                         &rsrc->port_stats[mbufs[i]->port].rx, 1,
392                                         __ATOMIC_RELAXED);
393
394                         if (j < vec->nb_elem)
395                                 rte_prefetch0(
396                                         rte_pktmbuf_mtod(mbufs[j], void *));
397
398                         mbufs[i]->port = rsrc->dst_ports[mbufs[i]->port];
399
400                         if (flags & L2FWD_EVENT_UPDT_MAC)
401                                 l2fwd_mac_updating(
402                                         mbufs[i], mbufs[i]->port,
403                                         &rsrc->eth_addr[mbufs[i]->port]);
404
405                         if (flags & L2FWD_EVENT_TX_DIRECT)
406                                 rte_event_eth_tx_adapter_txq_set(mbufs[i], 0);
407
408                         if (timer_period > 0)
409                                 __atomic_fetch_add(
410                                         &rsrc->port_stats[mbufs[i]->port].tx, 1,
411                                         __ATOMIC_RELAXED);
412                 }
413         }
414 }
415
416 static __rte_always_inline void
417 l2fwd_event_loop_vector(struct l2fwd_resources *rsrc, const uint32_t flags)
418 {
419         struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
420         const int port_id = l2fwd_get_free_event_port(evt_rsrc);
421         const uint8_t tx_q_id =
422                 evt_rsrc->evq.event_q_id[evt_rsrc->evq.nb_queues - 1];
423         const uint64_t timer_period = rsrc->timer_period;
424         const uint8_t event_d_id = evt_rsrc->event_d_id;
425         const uint8_t deq_len = evt_rsrc->deq_depth;
426         struct rte_event ev[MAX_PKT_BURST];
427         uint16_t nb_rx = 0, nb_tx = 0;
428         uint8_t i;
429
430         if (port_id < 0)
431                 return;
432
433         printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
434                rte_lcore_id());
435
436         while (!rsrc->force_quit) {
437                 nb_rx = rte_event_dequeue_burst(event_d_id, port_id, ev,
438                                                 deq_len, 0);
439                 if (nb_rx == 0)
440                         continue;
441
442                 for (i = 0; i < nb_rx; i++) {
443                         if (flags & L2FWD_EVENT_TX_ENQ) {
444                                 ev[i].queue_id = tx_q_id;
445                                 ev[i].op = RTE_EVENT_OP_FORWARD;
446                         }
447
448                         l2fwd_event_vector_fwd(rsrc, ev[i].vec, timer_period,
449                                                flags);
450                 }
451
452                 if (flags & L2FWD_EVENT_TX_ENQ) {
453                         nb_tx = rte_event_enqueue_burst(event_d_id, port_id, ev,
454                                                         nb_rx);
455                         while (nb_tx < nb_rx && !rsrc->force_quit)
456                                 nb_tx += rte_event_enqueue_burst(
457                                         event_d_id, port_id, ev + nb_tx,
458                                         nb_rx - nb_tx);
459                 }
460
461                 if (flags & L2FWD_EVENT_TX_DIRECT) {
462                         nb_tx = rte_event_eth_tx_adapter_enqueue(
463                                 event_d_id, port_id, ev, nb_rx, 0);
464                         while (nb_tx < nb_rx && !rsrc->force_quit)
465                                 nb_tx += rte_event_eth_tx_adapter_enqueue(
466                                         event_d_id, port_id, ev + nb_tx,
467                                         nb_rx - nb_tx, 0);
468                 }
469         }
470
471         l2fwd_event_worker_cleanup(event_d_id, port_id, ev, nb_rx, nb_tx, 1);
472 }
473
474 static void __rte_noinline
475 l2fwd_event_main_loop_tx_d_vec(struct l2fwd_resources *rsrc)
476 {
477         l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_DIRECT);
478 }
479
480 static void __rte_noinline
481 l2fwd_event_main_loop_tx_d_brst_vec(struct l2fwd_resources *rsrc)
482 {
483         l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_DIRECT);
484 }
485
486 static void __rte_noinline
487 l2fwd_event_main_loop_tx_q_vec(struct l2fwd_resources *rsrc)
488 {
489         l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_ENQ);
490 }
491
492 static void __rte_noinline
493 l2fwd_event_main_loop_tx_q_brst_vec(struct l2fwd_resources *rsrc)
494 {
495         l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_ENQ);
496 }
497
498 static void __rte_noinline
499 l2fwd_event_main_loop_tx_d_mac_vec(struct l2fwd_resources *rsrc)
500 {
501         l2fwd_event_loop_vector(rsrc,
502                                 L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_DIRECT);
503 }
504
505 static void __rte_noinline
506 l2fwd_event_main_loop_tx_d_brst_mac_vec(struct l2fwd_resources *rsrc)
507 {
508         l2fwd_event_loop_vector(rsrc,
509                                 L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_DIRECT);
510 }
511
512 static void __rte_noinline
513 l2fwd_event_main_loop_tx_q_mac_vec(struct l2fwd_resources *rsrc)
514 {
515         l2fwd_event_loop_vector(rsrc,
516                                 L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_ENQ);
517 }
518
519 static void __rte_noinline
520 l2fwd_event_main_loop_tx_q_brst_mac_vec(struct l2fwd_resources *rsrc)
521 {
522         l2fwd_event_loop_vector(rsrc,
523                                 L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_ENQ);
524 }
525
526 void
527 l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
528 {
529         /* [MAC_UPDT][TX_MODE][BURST] */
530         const event_loop_cb event_loop[2][2][2][2] = {
531                 [0][0][0][0] = l2fwd_event_main_loop_tx_d,
532                 [0][0][0][1] = l2fwd_event_main_loop_tx_d_brst,
533                 [0][0][1][0] = l2fwd_event_main_loop_tx_q,
534                 [0][0][1][1] = l2fwd_event_main_loop_tx_q_brst,
535                 [0][1][0][0] = l2fwd_event_main_loop_tx_d_mac,
536                 [0][1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac,
537                 [0][1][1][0] = l2fwd_event_main_loop_tx_q_mac,
538                 [0][1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac,
539                 [1][0][0][0] = l2fwd_event_main_loop_tx_d_vec,
540                 [1][0][0][1] = l2fwd_event_main_loop_tx_d_brst_vec,
541                 [1][0][1][0] = l2fwd_event_main_loop_tx_q_vec,
542                 [1][0][1][1] = l2fwd_event_main_loop_tx_q_brst_vec,
543                 [1][1][0][0] = l2fwd_event_main_loop_tx_d_mac_vec,
544                 [1][1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac_vec,
545                 [1][1][1][0] = l2fwd_event_main_loop_tx_q_mac_vec,
546                 [1][1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac_vec,
547         };
548         struct l2fwd_event_resources *evt_rsrc;
549         uint32_t event_queue_cfg;
550         int ret;
551
552         if (!rte_event_dev_count())
553                 rte_panic("No Eventdev found\n");
554
555         evt_rsrc = rte_zmalloc("l2fwd_event",
556                                  sizeof(struct l2fwd_event_resources), 0);
557         if (evt_rsrc == NULL)
558                 rte_panic("Failed to allocate memory\n");
559
560         rsrc->evt_rsrc = evt_rsrc;
561
562         /* Setup eventdev capability callbacks */
563         l2fwd_event_capability_setup(evt_rsrc);
564
565         /* Event device configuration */
566         event_queue_cfg = evt_rsrc->ops.event_device_setup(rsrc);
567
568         /* Event queue configuration */
569         evt_rsrc->ops.event_queue_setup(rsrc, event_queue_cfg);
570
571         /* Event port configuration */
572         evt_rsrc->ops.event_port_setup(rsrc);
573
574         /* Rx/Tx adapters configuration */
575         evt_rsrc->ops.adapter_setup(rsrc);
576
577         /* Start event device */
578         ret = rte_event_dev_start(evt_rsrc->event_d_id);
579         if (ret < 0)
580                 rte_panic("Error in starting eventdev\n");
581
582         evt_rsrc->ops.l2fwd_event_loop =
583                 event_loop[rsrc->evt_vec.enabled][rsrc->mac_updating]
584                           [evt_rsrc->tx_mode_q][evt_rsrc->has_burst];
585 }