1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #include <rte_atomic.h>
9 #include <rte_cycles.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_event_eth_rx_adapter.h>
13 #include <rte_event_eth_tx_adapter.h>
14 #include <rte_lcore.h>
15 #include <rte_malloc.h>
16 #include <rte_spinlock.h>
18 #include "l2fwd_event.h"
21 l2fwd_event_service_enable(uint32_t service_id)
23 uint8_t min_service_count = UINT8_MAX;
24 uint32_t slcore_array[RTE_MAX_LCORE];
25 unsigned int slcore = 0;
26 uint8_t service_count;
29 if (!rte_service_lcore_count())
32 slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
35 /* Get the core which has least number of services running. */
36 while (slcore_count--) {
37 /* Reset default mapping */
38 rte_service_map_lcore_set(service_id,
39 slcore_array[slcore_count], 0);
40 service_count = rte_service_lcore_count_services(
41 slcore_array[slcore_count]);
42 if (service_count < min_service_count) {
43 slcore = slcore_array[slcore_count];
44 min_service_count = service_count;
47 if (rte_service_map_lcore_set(service_id, slcore, 1))
49 rte_service_lcore_start(slcore);
55 l2fwd_event_service_setup(struct l2fwd_resources *rsrc)
57 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
58 struct rte_event_dev_info evdev_info;
59 uint32_t service_id, caps;
62 rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
63 if (evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) {
64 ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
66 if (ret != -ESRCH && ret != 0)
67 rte_panic("Error in starting eventdev service\n");
68 l2fwd_event_service_enable(service_id);
71 for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
72 ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
73 evt_rsrc->rx_adptr.rx_adptr[i], &caps);
75 rte_panic("Failed to get Rx adapter[%d] caps\n",
76 evt_rsrc->rx_adptr.rx_adptr[i]);
77 ret = rte_event_eth_rx_adapter_service_id_get(
80 if (ret != -ESRCH && ret != 0)
81 rte_panic("Error in starting Rx adapter[%d] service\n",
82 evt_rsrc->rx_adptr.rx_adptr[i]);
83 l2fwd_event_service_enable(service_id);
86 for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
87 ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
88 evt_rsrc->tx_adptr.tx_adptr[i], &caps);
90 rte_panic("Failed to get Rx adapter[%d] caps\n",
91 evt_rsrc->tx_adptr.tx_adptr[i]);
92 ret = rte_event_eth_tx_adapter_service_id_get(
95 if (ret != -ESRCH && ret != 0)
96 rte_panic("Error in starting Rx adapter[%d] service\n",
97 evt_rsrc->tx_adptr.tx_adptr[i]);
98 l2fwd_event_service_enable(service_id);
103 l2fwd_event_capability_setup(struct l2fwd_event_resources *evt_rsrc)
109 RTE_ETH_FOREACH_DEV(i) {
110 ret = rte_event_eth_tx_adapter_caps_get(0, i, &caps);
112 rte_panic("Invalid capability for Tx adptr port %d\n",
115 evt_rsrc->tx_mode_q |= !(caps &
116 RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
119 if (evt_rsrc->tx_mode_q)
120 l2fwd_event_set_generic_ops(&evt_rsrc->ops);
122 l2fwd_event_set_internal_port_ops(&evt_rsrc->ops);
126 l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
128 struct l2fwd_event_resources *evt_rsrc;
129 uint32_t event_queue_cfg;
131 if (!rte_event_dev_count())
132 rte_panic("No Eventdev found\n");
134 evt_rsrc = rte_zmalloc("l2fwd_event",
135 sizeof(struct l2fwd_event_resources), 0);
136 if (evt_rsrc == NULL)
137 rte_panic("Failed to allocate memory\n");
139 rsrc->evt_rsrc = evt_rsrc;
141 /* Setup eventdev capability callbacks */
142 l2fwd_event_capability_setup(evt_rsrc);
144 /* Event device configuration */
145 event_queue_cfg = evt_rsrc->ops.event_device_setup(rsrc);
147 /* Event queue configuration */
148 evt_rsrc->ops.event_queue_setup(rsrc, event_queue_cfg);
150 /* Event port configuration */
151 evt_rsrc->ops.event_port_setup(rsrc);
153 /* Rx/Tx adapters configuration */
154 evt_rsrc->ops.adapter_setup(rsrc);