1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #include <rte_cycles.h>
9 #include <rte_ethdev.h>
10 #include <rte_eventdev.h>
11 #include <rte_event_eth_rx_adapter.h>
12 #include <rte_event_eth_tx_adapter.h>
13 #include <rte_lcore.h>
14 #include <rte_malloc.h>
15 #include <rte_spinlock.h>
17 #include "l2fwd_event.h"
19 #define L2FWD_EVENT_SINGLE 0x1
20 #define L2FWD_EVENT_BURST 0x2
21 #define L2FWD_EVENT_TX_DIRECT 0x4
22 #define L2FWD_EVENT_TX_ENQ 0x8
23 #define L2FWD_EVENT_UPDT_MAC 0x10
26 l2fwd_event_service_enable(uint32_t service_id)
28 uint8_t min_service_count = UINT8_MAX;
29 uint32_t slcore_array[RTE_MAX_LCORE];
30 unsigned int slcore = 0;
31 uint8_t service_count;
34 if (!rte_service_lcore_count())
37 slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
40 /* Get the core which has least number of services running. */
41 while (slcore_count--) {
42 /* Reset default mapping */
43 if (rte_service_map_lcore_set(service_id,
44 slcore_array[slcore_count], 0) != 0)
46 service_count = rte_service_lcore_count_services(
47 slcore_array[slcore_count]);
48 if (service_count < min_service_count) {
49 slcore = slcore_array[slcore_count];
50 min_service_count = service_count;
53 if (rte_service_map_lcore_set(service_id, slcore, 1) != 0)
55 rte_service_lcore_start(slcore);
61 l2fwd_event_service_setup(struct l2fwd_resources *rsrc)
63 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
64 struct rte_event_dev_info evdev_info;
65 uint32_t service_id, caps;
68 /* Running eventdev scheduler service on service core. 8< */
69 rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
70 if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
71 ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
73 if (ret != -ESRCH && ret != 0)
74 rte_panic("Error in starting eventdev service\n");
75 l2fwd_event_service_enable(service_id);
77 /* >8 End of running eventdev scheduler service on service core. */
79 /* Gets service ID for RX/TX adapters. 8< */
80 for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
81 ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
82 evt_rsrc->rx_adptr.rx_adptr[i], &caps);
84 rte_panic("Failed to get Rx adapter[%d] caps\n",
85 evt_rsrc->rx_adptr.rx_adptr[i]);
86 ret = rte_event_eth_rx_adapter_service_id_get(
89 if (ret != -ESRCH && ret != 0)
90 rte_panic("Error in starting Rx adapter[%d] service\n",
91 evt_rsrc->rx_adptr.rx_adptr[i]);
92 l2fwd_event_service_enable(service_id);
95 for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
96 ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
97 evt_rsrc->tx_adptr.tx_adptr[i], &caps);
99 rte_panic("Failed to get Rx adapter[%d] caps\n",
100 evt_rsrc->tx_adptr.tx_adptr[i]);
101 ret = rte_event_eth_tx_adapter_service_id_get(
102 evt_rsrc->event_d_id,
104 if (ret != -ESRCH && ret != 0)
105 rte_panic("Error in starting Rx adapter[%d] service\n",
106 evt_rsrc->tx_adptr.tx_adptr[i]);
107 l2fwd_event_service_enable(service_id);
109 /* >8 End of get service ID for RX/TX adapters. */
113 l2fwd_event_capability_setup(struct l2fwd_event_resources *evt_rsrc)
119 RTE_ETH_FOREACH_DEV(i) {
120 ret = rte_event_eth_tx_adapter_caps_get(0, i, &caps);
122 rte_panic("Invalid capability for Tx adptr port %d\n",
125 evt_rsrc->tx_mode_q |= !(caps &
126 RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
129 if (evt_rsrc->tx_mode_q)
130 l2fwd_event_set_generic_ops(&evt_rsrc->ops);
132 l2fwd_event_set_internal_port_ops(&evt_rsrc->ops);
135 static __rte_noinline int
136 l2fwd_get_free_event_port(struct l2fwd_event_resources *evt_rsrc)
141 rte_spinlock_lock(&evt_rsrc->evp.lock);
142 if (index >= evt_rsrc->evp.nb_ports) {
143 printf("No free event port is available\n");
147 port_id = evt_rsrc->evp.event_p_id[index];
149 rte_spinlock_unlock(&evt_rsrc->evp.lock);
154 static __rte_always_inline void
155 l2fwd_event_fwd(struct l2fwd_resources *rsrc, struct rte_event *ev,
156 const uint8_t tx_q_id, const uint64_t timer_period,
157 const uint32_t flags)
159 struct rte_mbuf *mbuf = ev->mbuf;
162 rte_prefetch0(rte_pktmbuf_mtod(mbuf, void *));
163 dst_port = rsrc->dst_ports[mbuf->port];
165 if (timer_period > 0)
166 __atomic_fetch_add(&rsrc->port_stats[mbuf->port].rx,
167 1, __ATOMIC_RELAXED);
168 mbuf->port = dst_port;
170 if (flags & L2FWD_EVENT_UPDT_MAC)
171 l2fwd_mac_updating(mbuf, dst_port, &rsrc->eth_addr[dst_port]);
173 if (flags & L2FWD_EVENT_TX_ENQ) {
174 ev->queue_id = tx_q_id;
175 ev->op = RTE_EVENT_OP_FORWARD;
178 if (flags & L2FWD_EVENT_TX_DIRECT)
179 rte_event_eth_tx_adapter_txq_set(mbuf, 0);
181 if (timer_period > 0)
182 __atomic_fetch_add(&rsrc->port_stats[mbuf->port].tx,
183 1, __ATOMIC_RELAXED);
186 static __rte_always_inline void
187 l2fwd_event_loop_single(struct l2fwd_resources *rsrc,
188 const uint32_t flags)
190 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
191 const int port_id = l2fwd_get_free_event_port(evt_rsrc);
192 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
193 evt_rsrc->evq.nb_queues - 1];
194 const uint64_t timer_period = rsrc->timer_period;
195 const uint8_t event_d_id = evt_rsrc->event_d_id;
196 uint8_t enq = 0, deq = 0;
202 printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
205 while (!rsrc->force_quit) {
206 /* Read packet from eventdev */
207 deq = rte_event_dequeue_burst(event_d_id, port_id, &ev, 1, 0);
211 l2fwd_event_fwd(rsrc, &ev, tx_q_id, timer_period, flags);
213 if (flags & L2FWD_EVENT_TX_ENQ) {
215 enq = rte_event_enqueue_burst(event_d_id,
217 } while (!enq && !rsrc->force_quit);
220 if (flags & L2FWD_EVENT_TX_DIRECT) {
222 enq = rte_event_eth_tx_adapter_enqueue(
223 event_d_id, port_id, &ev, 1, 0);
224 } while (!enq && !rsrc->force_quit);
228 l2fwd_event_worker_cleanup(event_d_id, port_id, &ev, enq, deq, 0);
231 static __rte_always_inline void
232 l2fwd_event_loop_burst(struct l2fwd_resources *rsrc,
233 const uint32_t flags)
235 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
236 const int port_id = l2fwd_get_free_event_port(evt_rsrc);
237 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
238 evt_rsrc->evq.nb_queues - 1];
239 const uint64_t timer_period = rsrc->timer_period;
240 const uint8_t event_d_id = evt_rsrc->event_d_id;
241 const uint8_t deq_len = evt_rsrc->deq_depth;
242 struct rte_event ev[MAX_PKT_BURST];
243 uint16_t nb_rx = 0, nb_tx = 0;
249 printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
252 while (!rsrc->force_quit) {
253 /* Read packet from eventdev. 8< */
254 nb_rx = rte_event_dequeue_burst(event_d_id, port_id, ev,
259 for (i = 0; i < nb_rx; i++) {
260 l2fwd_event_fwd(rsrc, &ev[i], tx_q_id, timer_period,
263 /* >8 End of reading packets from eventdev. */
265 if (flags & L2FWD_EVENT_TX_ENQ) {
266 /* Forwarding to destination ports. 8< */
267 nb_tx = rte_event_enqueue_burst(event_d_id, port_id,
269 while (nb_tx < nb_rx && !rsrc->force_quit)
270 nb_tx += rte_event_enqueue_burst(event_d_id,
273 /* >8 End of forwarding to destination ports. */
276 if (flags & L2FWD_EVENT_TX_DIRECT) {
277 nb_tx = rte_event_eth_tx_adapter_enqueue(event_d_id,
280 while (nb_tx < nb_rx && !rsrc->force_quit)
281 nb_tx += rte_event_eth_tx_adapter_enqueue(
283 ev + nb_tx, nb_rx - nb_tx, 0);
287 l2fwd_event_worker_cleanup(event_d_id, port_id, ev, nb_rx, nb_tx, 0);
290 static __rte_always_inline void
291 l2fwd_event_loop(struct l2fwd_resources *rsrc,
292 const uint32_t flags)
294 if (flags & L2FWD_EVENT_SINGLE)
295 l2fwd_event_loop_single(rsrc, flags);
296 if (flags & L2FWD_EVENT_BURST)
297 l2fwd_event_loop_burst(rsrc, flags);
300 static void __rte_noinline
301 l2fwd_event_main_loop_tx_d(struct l2fwd_resources *rsrc)
303 l2fwd_event_loop(rsrc,
304 L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_SINGLE);
307 static void __rte_noinline
308 l2fwd_event_main_loop_tx_d_brst(struct l2fwd_resources *rsrc)
310 l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_BURST);
313 static void __rte_noinline
314 l2fwd_event_main_loop_tx_q(struct l2fwd_resources *rsrc)
316 l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_SINGLE);
319 static void __rte_noinline
320 l2fwd_event_main_loop_tx_q_brst(struct l2fwd_resources *rsrc)
322 l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
325 static void __rte_noinline
326 l2fwd_event_main_loop_tx_d_mac(struct l2fwd_resources *rsrc)
328 l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
329 L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_SINGLE);
332 static void __rte_noinline
333 l2fwd_event_main_loop_tx_d_brst_mac(struct l2fwd_resources *rsrc)
335 l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
336 L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_BURST);
339 static void __rte_noinline
340 l2fwd_event_main_loop_tx_q_mac(struct l2fwd_resources *rsrc)
342 l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
343 L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_SINGLE);
346 static void __rte_noinline
347 l2fwd_event_main_loop_tx_q_brst_mac(struct l2fwd_resources *rsrc)
349 l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
350 L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
353 static __rte_always_inline void
354 l2fwd_event_vector_fwd(struct l2fwd_resources *rsrc,
355 struct rte_event_vector *vec,
356 const uint64_t timer_period, const uint32_t flags)
358 struct rte_mbuf **mbufs = vec->mbufs;
361 rte_prefetch0(rte_pktmbuf_mtod(mbufs[0], void *));
363 /* If vector attribute is valid, mbufs will be from same port/queue */
364 if (vec->attr_valid) {
365 vec->port = rsrc->dst_ports[mbufs[0]->port];
366 if (flags & L2FWD_EVENT_TX_DIRECT)
369 if (timer_period > 0)
370 __atomic_fetch_add(&rsrc->port_stats[mbufs[0]->port].rx,
371 vec->nb_elem, __ATOMIC_RELAXED);
373 for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
374 if (j < vec->nb_elem)
376 rte_pktmbuf_mtod(mbufs[j], void *));
378 if (flags & L2FWD_EVENT_UPDT_MAC)
381 &rsrc->eth_addr[vec->port]);
384 if (timer_period > 0)
385 __atomic_fetch_add(&rsrc->port_stats[vec->port].tx,
386 vec->nb_elem, __ATOMIC_RELAXED);
388 for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
389 if (timer_period > 0)
391 &rsrc->port_stats[mbufs[i]->port].rx, 1,
394 if (j < vec->nb_elem)
396 rte_pktmbuf_mtod(mbufs[j], void *));
398 mbufs[i]->port = rsrc->dst_ports[mbufs[i]->port];
400 if (flags & L2FWD_EVENT_UPDT_MAC)
402 mbufs[i], mbufs[i]->port,
403 &rsrc->eth_addr[mbufs[i]->port]);
405 if (flags & L2FWD_EVENT_TX_DIRECT)
406 rte_event_eth_tx_adapter_txq_set(mbufs[i], 0);
408 if (timer_period > 0)
410 &rsrc->port_stats[mbufs[i]->port].tx, 1,
416 static __rte_always_inline void
417 l2fwd_event_loop_vector(struct l2fwd_resources *rsrc, const uint32_t flags)
419 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
420 const int port_id = l2fwd_get_free_event_port(evt_rsrc);
421 const uint8_t tx_q_id =
422 evt_rsrc->evq.event_q_id[evt_rsrc->evq.nb_queues - 1];
423 const uint64_t timer_period = rsrc->timer_period;
424 const uint8_t event_d_id = evt_rsrc->event_d_id;
425 const uint8_t deq_len = evt_rsrc->deq_depth;
426 struct rte_event ev[MAX_PKT_BURST];
427 uint16_t nb_rx = 0, nb_tx = 0;
433 printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
436 while (!rsrc->force_quit) {
437 nb_rx = rte_event_dequeue_burst(event_d_id, port_id, ev,
442 for (i = 0; i < nb_rx; i++) {
443 if (flags & L2FWD_EVENT_TX_ENQ) {
444 ev[i].queue_id = tx_q_id;
445 ev[i].op = RTE_EVENT_OP_FORWARD;
448 l2fwd_event_vector_fwd(rsrc, ev[i].vec, timer_period,
452 if (flags & L2FWD_EVENT_TX_ENQ) {
453 nb_tx = rte_event_enqueue_burst(event_d_id, port_id, ev,
455 while (nb_tx < nb_rx && !rsrc->force_quit)
456 nb_tx += rte_event_enqueue_burst(
457 event_d_id, port_id, ev + nb_tx,
461 if (flags & L2FWD_EVENT_TX_DIRECT) {
462 nb_tx = rte_event_eth_tx_adapter_enqueue(
463 event_d_id, port_id, ev, nb_rx, 0);
464 while (nb_tx < nb_rx && !rsrc->force_quit)
465 nb_tx += rte_event_eth_tx_adapter_enqueue(
466 event_d_id, port_id, ev + nb_tx,
471 l2fwd_event_worker_cleanup(event_d_id, port_id, ev, nb_rx, nb_tx, 1);
474 static void __rte_noinline
475 l2fwd_event_main_loop_tx_d_vec(struct l2fwd_resources *rsrc)
477 l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_DIRECT);
480 static void __rte_noinline
481 l2fwd_event_main_loop_tx_d_brst_vec(struct l2fwd_resources *rsrc)
483 l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_DIRECT);
486 static void __rte_noinline
487 l2fwd_event_main_loop_tx_q_vec(struct l2fwd_resources *rsrc)
489 l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_ENQ);
492 static void __rte_noinline
493 l2fwd_event_main_loop_tx_q_brst_vec(struct l2fwd_resources *rsrc)
495 l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_ENQ);
498 static void __rte_noinline
499 l2fwd_event_main_loop_tx_d_mac_vec(struct l2fwd_resources *rsrc)
501 l2fwd_event_loop_vector(rsrc,
502 L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_DIRECT);
505 static void __rte_noinline
506 l2fwd_event_main_loop_tx_d_brst_mac_vec(struct l2fwd_resources *rsrc)
508 l2fwd_event_loop_vector(rsrc,
509 L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_DIRECT);
512 static void __rte_noinline
513 l2fwd_event_main_loop_tx_q_mac_vec(struct l2fwd_resources *rsrc)
515 l2fwd_event_loop_vector(rsrc,
516 L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_ENQ);
519 static void __rte_noinline
520 l2fwd_event_main_loop_tx_q_brst_mac_vec(struct l2fwd_resources *rsrc)
522 l2fwd_event_loop_vector(rsrc,
523 L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_ENQ);
527 l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
529 /* [MAC_UPDT][TX_MODE][BURST] */
530 const event_loop_cb event_loop[2][2][2][2] = {
531 [0][0][0][0] = l2fwd_event_main_loop_tx_d,
532 [0][0][0][1] = l2fwd_event_main_loop_tx_d_brst,
533 [0][0][1][0] = l2fwd_event_main_loop_tx_q,
534 [0][0][1][1] = l2fwd_event_main_loop_tx_q_brst,
535 [0][1][0][0] = l2fwd_event_main_loop_tx_d_mac,
536 [0][1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac,
537 [0][1][1][0] = l2fwd_event_main_loop_tx_q_mac,
538 [0][1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac,
539 [1][0][0][0] = l2fwd_event_main_loop_tx_d_vec,
540 [1][0][0][1] = l2fwd_event_main_loop_tx_d_brst_vec,
541 [1][0][1][0] = l2fwd_event_main_loop_tx_q_vec,
542 [1][0][1][1] = l2fwd_event_main_loop_tx_q_brst_vec,
543 [1][1][0][0] = l2fwd_event_main_loop_tx_d_mac_vec,
544 [1][1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac_vec,
545 [1][1][1][0] = l2fwd_event_main_loop_tx_q_mac_vec,
546 [1][1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac_vec,
548 struct l2fwd_event_resources *evt_rsrc;
549 uint32_t event_queue_cfg;
552 if (!rte_event_dev_count())
553 rte_panic("No Eventdev found\n");
555 evt_rsrc = rte_zmalloc("l2fwd_event",
556 sizeof(struct l2fwd_event_resources), 0);
557 if (evt_rsrc == NULL)
558 rte_panic("Failed to allocate memory\n");
560 rsrc->evt_rsrc = evt_rsrc;
562 /* Setup eventdev capability callbacks */
563 l2fwd_event_capability_setup(evt_rsrc);
565 /* Event device configuration */
566 event_queue_cfg = evt_rsrc->ops.event_device_setup(rsrc);
568 /* Event queue configuration */
569 evt_rsrc->ops.event_queue_setup(rsrc, event_queue_cfg);
571 /* Event port configuration */
572 evt_rsrc->ops.event_port_setup(rsrc);
574 /* Rx/Tx adapters configuration */
575 evt_rsrc->ops.adapter_setup(rsrc);
577 /* Start event device */
578 ret = rte_event_dev_start(evt_rsrc->event_d_id);
580 rte_panic("Error in starting eventdev\n");
582 evt_rsrc->ops.l2fwd_event_loop =
583 event_loop[rsrc->evt_vec.enabled][rsrc->mac_updating]
584 [evt_rsrc->tx_mode_q][evt_rsrc->has_burst];