1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #include <rte_atomic.h>
9 #include <rte_cycles.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_event_eth_rx_adapter.h>
13 #include <rte_event_eth_tx_adapter.h>
14 #include <rte_lcore.h>
15 #include <rte_malloc.h>
16 #include <rte_spinlock.h>
18 #include "l2fwd_event.h"
20 #define L2FWD_EVENT_SINGLE 0x1
21 #define L2FWD_EVENT_BURST 0x2
22 #define L2FWD_EVENT_TX_DIRECT 0x4
23 #define L2FWD_EVENT_TX_ENQ 0x8
24 #define L2FWD_EVENT_UPDT_MAC 0x10
27 l2fwd_event_service_enable(uint32_t service_id)
29 uint8_t min_service_count = UINT8_MAX;
30 uint32_t slcore_array[RTE_MAX_LCORE];
31 unsigned int slcore = 0;
32 uint8_t service_count;
35 if (!rte_service_lcore_count())
38 slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
41 /* Get the core which has least number of services running. */
42 while (slcore_count--) {
43 /* Reset default mapping */
44 if (rte_service_map_lcore_set(service_id,
45 slcore_array[slcore_count], 0) != 0)
47 service_count = rte_service_lcore_count_services(
48 slcore_array[slcore_count]);
49 if (service_count < min_service_count) {
50 slcore = slcore_array[slcore_count];
51 min_service_count = service_count;
54 if (rte_service_map_lcore_set(service_id, slcore, 1) != 0)
56 rte_service_lcore_start(slcore);
62 l2fwd_event_service_setup(struct l2fwd_resources *rsrc)
64 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
65 struct rte_event_dev_info evdev_info;
66 uint32_t service_id, caps;
69 rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
70 if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
71 ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
73 if (ret != -ESRCH && ret != 0)
74 rte_panic("Error in starting eventdev service\n");
75 l2fwd_event_service_enable(service_id);
78 for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
79 ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
80 evt_rsrc->rx_adptr.rx_adptr[i], &caps);
82 rte_panic("Failed to get Rx adapter[%d] caps\n",
83 evt_rsrc->rx_adptr.rx_adptr[i]);
84 ret = rte_event_eth_rx_adapter_service_id_get(
87 if (ret != -ESRCH && ret != 0)
88 rte_panic("Error in starting Rx adapter[%d] service\n",
89 evt_rsrc->rx_adptr.rx_adptr[i]);
90 l2fwd_event_service_enable(service_id);
93 for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
94 ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
95 evt_rsrc->tx_adptr.tx_adptr[i], &caps);
97 rte_panic("Failed to get Rx adapter[%d] caps\n",
98 evt_rsrc->tx_adptr.tx_adptr[i]);
99 ret = rte_event_eth_tx_adapter_service_id_get(
100 evt_rsrc->event_d_id,
102 if (ret != -ESRCH && ret != 0)
103 rte_panic("Error in starting Rx adapter[%d] service\n",
104 evt_rsrc->tx_adptr.tx_adptr[i]);
105 l2fwd_event_service_enable(service_id);
110 l2fwd_event_capability_setup(struct l2fwd_event_resources *evt_rsrc)
116 RTE_ETH_FOREACH_DEV(i) {
117 ret = rte_event_eth_tx_adapter_caps_get(0, i, &caps);
119 rte_panic("Invalid capability for Tx adptr port %d\n",
122 evt_rsrc->tx_mode_q |= !(caps &
123 RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
126 if (evt_rsrc->tx_mode_q)
127 l2fwd_event_set_generic_ops(&evt_rsrc->ops);
129 l2fwd_event_set_internal_port_ops(&evt_rsrc->ops);
132 static __rte_noinline int
133 l2fwd_get_free_event_port(struct l2fwd_event_resources *evt_rsrc)
138 rte_spinlock_lock(&evt_rsrc->evp.lock);
139 if (index >= evt_rsrc->evp.nb_ports) {
140 printf("No free event port is available\n");
144 port_id = evt_rsrc->evp.event_p_id[index];
146 rte_spinlock_unlock(&evt_rsrc->evp.lock);
151 static __rte_always_inline void
152 l2fwd_event_fwd(struct l2fwd_resources *rsrc, struct rte_event *ev,
153 const uint8_t tx_q_id, const uint64_t timer_period,
154 const uint32_t flags)
156 struct rte_mbuf *mbuf = ev->mbuf;
159 rte_prefetch0(rte_pktmbuf_mtod(mbuf, void *));
160 dst_port = rsrc->dst_ports[mbuf->port];
162 if (timer_period > 0)
163 __atomic_fetch_add(&rsrc->port_stats[mbuf->port].rx,
164 1, __ATOMIC_RELAXED);
165 mbuf->port = dst_port;
167 if (flags & L2FWD_EVENT_UPDT_MAC)
168 l2fwd_mac_updating(mbuf, dst_port, &rsrc->eth_addr[dst_port]);
170 if (flags & L2FWD_EVENT_TX_ENQ) {
171 ev->queue_id = tx_q_id;
172 ev->op = RTE_EVENT_OP_FORWARD;
175 if (flags & L2FWD_EVENT_TX_DIRECT)
176 rte_event_eth_tx_adapter_txq_set(mbuf, 0);
178 if (timer_period > 0)
179 __atomic_fetch_add(&rsrc->port_stats[mbuf->port].tx,
180 1, __ATOMIC_RELAXED);
183 static __rte_always_inline void
184 l2fwd_event_loop_single(struct l2fwd_resources *rsrc,
185 const uint32_t flags)
187 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
188 const int port_id = l2fwd_get_free_event_port(evt_rsrc);
189 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
190 evt_rsrc->evq.nb_queues - 1];
191 const uint64_t timer_period = rsrc->timer_period;
192 const uint8_t event_d_id = evt_rsrc->event_d_id;
198 printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
201 while (!rsrc->force_quit) {
202 /* Read packet from eventdev */
203 if (!rte_event_dequeue_burst(event_d_id, port_id, &ev, 1, 0))
206 l2fwd_event_fwd(rsrc, &ev, tx_q_id, timer_period, flags);
208 if (flags & L2FWD_EVENT_TX_ENQ) {
209 while (rte_event_enqueue_burst(event_d_id, port_id,
215 if (flags & L2FWD_EVENT_TX_DIRECT) {
216 while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
225 static __rte_always_inline void
226 l2fwd_event_loop_burst(struct l2fwd_resources *rsrc,
227 const uint32_t flags)
229 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
230 const int port_id = l2fwd_get_free_event_port(evt_rsrc);
231 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
232 evt_rsrc->evq.nb_queues - 1];
233 const uint64_t timer_period = rsrc->timer_period;
234 const uint8_t event_d_id = evt_rsrc->event_d_id;
235 const uint8_t deq_len = evt_rsrc->deq_depth;
236 struct rte_event ev[MAX_PKT_BURST];
237 uint16_t nb_rx, nb_tx;
243 printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
246 while (!rsrc->force_quit) {
247 /* Read packet from eventdev */
248 nb_rx = rte_event_dequeue_burst(event_d_id, port_id, ev,
253 for (i = 0; i < nb_rx; i++) {
254 l2fwd_event_fwd(rsrc, &ev[i], tx_q_id, timer_period,
258 if (flags & L2FWD_EVENT_TX_ENQ) {
259 nb_tx = rte_event_enqueue_burst(event_d_id, port_id,
261 while (nb_tx < nb_rx && !rsrc->force_quit)
262 nb_tx += rte_event_enqueue_burst(event_d_id,
267 if (flags & L2FWD_EVENT_TX_DIRECT) {
268 nb_tx = rte_event_eth_tx_adapter_enqueue(event_d_id,
271 while (nb_tx < nb_rx && !rsrc->force_quit)
272 nb_tx += rte_event_eth_tx_adapter_enqueue(
274 ev + nb_tx, nb_rx - nb_tx, 0);
279 static __rte_always_inline void
280 l2fwd_event_loop(struct l2fwd_resources *rsrc,
281 const uint32_t flags)
283 if (flags & L2FWD_EVENT_SINGLE)
284 l2fwd_event_loop_single(rsrc, flags);
285 if (flags & L2FWD_EVENT_BURST)
286 l2fwd_event_loop_burst(rsrc, flags);
289 static void __rte_noinline
290 l2fwd_event_main_loop_tx_d(struct l2fwd_resources *rsrc)
292 l2fwd_event_loop(rsrc,
293 L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_SINGLE);
296 static void __rte_noinline
297 l2fwd_event_main_loop_tx_d_brst(struct l2fwd_resources *rsrc)
299 l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_BURST);
302 static void __rte_noinline
303 l2fwd_event_main_loop_tx_q(struct l2fwd_resources *rsrc)
305 l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_SINGLE);
308 static void __rte_noinline
309 l2fwd_event_main_loop_tx_q_brst(struct l2fwd_resources *rsrc)
311 l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
314 static void __rte_noinline
315 l2fwd_event_main_loop_tx_d_mac(struct l2fwd_resources *rsrc)
317 l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
318 L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_SINGLE);
321 static void __rte_noinline
322 l2fwd_event_main_loop_tx_d_brst_mac(struct l2fwd_resources *rsrc)
324 l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
325 L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_BURST);
328 static void __rte_noinline
329 l2fwd_event_main_loop_tx_q_mac(struct l2fwd_resources *rsrc)
331 l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
332 L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_SINGLE);
335 static void __rte_noinline
336 l2fwd_event_main_loop_tx_q_brst_mac(struct l2fwd_resources *rsrc)
338 l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
339 L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
343 l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
345 /* [MAC_UPDT][TX_MODE][BURST] */
346 const event_loop_cb event_loop[2][2][2] = {
347 [0][0][0] = l2fwd_event_main_loop_tx_d,
348 [0][0][1] = l2fwd_event_main_loop_tx_d_brst,
349 [0][1][0] = l2fwd_event_main_loop_tx_q,
350 [0][1][1] = l2fwd_event_main_loop_tx_q_brst,
351 [1][0][0] = l2fwd_event_main_loop_tx_d_mac,
352 [1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac,
353 [1][1][0] = l2fwd_event_main_loop_tx_q_mac,
354 [1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac,
356 struct l2fwd_event_resources *evt_rsrc;
357 uint32_t event_queue_cfg;
360 if (!rte_event_dev_count())
361 rte_panic("No Eventdev found\n");
363 evt_rsrc = rte_zmalloc("l2fwd_event",
364 sizeof(struct l2fwd_event_resources), 0);
365 if (evt_rsrc == NULL)
366 rte_panic("Failed to allocate memory\n");
368 rsrc->evt_rsrc = evt_rsrc;
370 /* Setup eventdev capability callbacks */
371 l2fwd_event_capability_setup(evt_rsrc);
373 /* Event device configuration */
374 event_queue_cfg = evt_rsrc->ops.event_device_setup(rsrc);
376 /* Event queue configuration */
377 evt_rsrc->ops.event_queue_setup(rsrc, event_queue_cfg);
379 /* Event port configuration */
380 evt_rsrc->ops.event_port_setup(rsrc);
382 /* Rx/Tx adapters configuration */
383 evt_rsrc->ops.adapter_setup(rsrc);
385 /* Start event device */
386 ret = rte_event_dev_start(evt_rsrc->event_d_id);
388 rte_panic("Error in starting eventdev\n");
390 evt_rsrc->ops.l2fwd_event_loop = event_loop
392 [evt_rsrc->tx_mode_q]
393 [evt_rsrc->has_burst];