1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #include <rte_cycles.h>
9 #include <rte_ethdev.h>
10 #include <rte_eventdev.h>
11 #include <rte_event_eth_rx_adapter.h>
12 #include <rte_event_eth_tx_adapter.h>
13 #include <rte_lcore.h>
14 #include <rte_spinlock.h>
16 #include "l2fwd_common.h"
17 #include "l2fwd_event.h"
20 l2fwd_event_device_setup_generic(struct l2fwd_resources *rsrc)
22 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
23 /* Configures event device as per below configuration. 8< */
24 struct rte_event_dev_config event_d_conf = {
25 .nb_events_limit = 4096,
26 .nb_event_queue_flows = 1024,
27 .nb_event_port_dequeue_depth = 128,
28 .nb_event_port_enqueue_depth = 128
30 /* >8 End of configuration event device as per below configuration. */
31 struct rte_event_dev_info dev_info;
32 const uint8_t event_d_id = 0; /* Always use first event device only */
33 uint32_t event_queue_cfg = 0;
34 uint16_t ethdev_count = 0;
35 uint16_t num_workers = 0;
39 RTE_ETH_FOREACH_DEV(port_id) {
40 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
45 /* Event device configurtion */
46 rte_event_dev_info_get(event_d_id, &dev_info);
48 /* Enable implicit release */
49 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)
50 evt_rsrc->disable_implicit_release = 0;
52 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
53 event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
55 /* One queue for each ethdev port + one Tx adapter Single link queue. */
56 event_d_conf.nb_event_queues = ethdev_count + 1;
57 if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
58 event_d_conf.nb_event_queues = dev_info.max_event_queues;
60 if (dev_info.max_num_events < event_d_conf.nb_events_limit)
61 event_d_conf.nb_events_limit = dev_info.max_num_events;
63 if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
64 event_d_conf.nb_event_queue_flows =
65 dev_info.max_event_queue_flows;
67 if (dev_info.max_event_port_dequeue_depth <
68 event_d_conf.nb_event_port_dequeue_depth)
69 event_d_conf.nb_event_port_dequeue_depth =
70 dev_info.max_event_port_dequeue_depth;
72 if (dev_info.max_event_port_enqueue_depth <
73 event_d_conf.nb_event_port_enqueue_depth)
74 event_d_conf.nb_event_port_enqueue_depth =
75 dev_info.max_event_port_enqueue_depth;
77 /* Ignore Main core and service cores. */
78 num_workers = rte_lcore_count() - 1 - rte_service_lcore_count();
79 if (dev_info.max_event_ports < num_workers)
80 num_workers = dev_info.max_event_ports;
82 event_d_conf.nb_event_ports = num_workers;
83 evt_rsrc->evp.nb_ports = num_workers;
84 evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues;
86 evt_rsrc->has_burst = !!(dev_info.event_dev_cap &
87 RTE_EVENT_DEV_CAP_BURST_MODE);
89 ret = rte_event_dev_configure(event_d_id, &event_d_conf);
91 rte_panic("Error in configuring event device\n");
93 evt_rsrc->event_d_id = event_d_id;
94 return event_queue_cfg;
98 l2fwd_event_port_setup_generic(struct l2fwd_resources *rsrc)
100 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
101 uint8_t event_d_id = evt_rsrc->event_d_id;
102 /* Event port initialization. 8< */
103 struct rte_event_port_conf event_p_conf = {
106 .new_event_threshold = 4096
108 struct rte_event_port_conf def_p_conf;
112 evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
113 evt_rsrc->evp.nb_ports);
114 if (!evt_rsrc->evp.event_p_id)
115 rte_panic("No space is available\n");
117 memset(&def_p_conf, 0, sizeof(struct rte_event_port_conf));
118 ret = rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
120 rte_panic("Error to get default configuration of event port\n");
122 if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
123 event_p_conf.new_event_threshold =
124 def_p_conf.new_event_threshold;
126 if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
127 event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
129 if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
130 event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
132 event_p_conf.event_port_cfg = 0;
133 if (evt_rsrc->disable_implicit_release)
134 event_p_conf.event_port_cfg |=
135 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
137 evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
139 for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
141 ret = rte_event_port_setup(event_d_id, event_p_id,
144 rte_panic("Error in configuring event port %d\n",
147 ret = rte_event_port_link(event_d_id, event_p_id,
148 evt_rsrc->evq.event_q_id,
150 evt_rsrc->evq.nb_queues - 1);
151 if (ret != (evt_rsrc->evq.nb_queues - 1))
152 rte_panic("Error in linking event port %d to queues\n",
154 evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
155 /* >8 End of event port initialization. */
158 rte_spinlock_init(&evt_rsrc->evp.lock);
160 evt_rsrc->def_p_conf = event_p_conf;
164 l2fwd_event_queue_setup_generic(struct l2fwd_resources *rsrc,
165 uint32_t event_queue_cfg)
167 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
168 uint8_t event_d_id = evt_rsrc->event_d_id;
169 /* Event queue initialization. 8< */
170 struct rte_event_queue_conf event_q_conf = {
171 .nb_atomic_flows = 1024,
172 .nb_atomic_order_sequences = 1024,
173 .event_queue_cfg = event_queue_cfg,
174 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
176 struct rte_event_queue_conf def_q_conf;
180 event_q_conf.schedule_type = rsrc->sched_type;
181 evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
182 evt_rsrc->evq.nb_queues);
183 if (!evt_rsrc->evq.event_q_id)
184 rte_panic("Memory allocation failure\n");
186 ret = rte_event_queue_default_conf_get(event_d_id, 0, &def_q_conf);
188 rte_panic("Error to get default config of event queue\n");
189 /* >8 End of event queue initialization. */
191 if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
192 event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
194 for (event_q_id = 0; event_q_id < (evt_rsrc->evq.nb_queues - 1);
196 ret = rte_event_queue_setup(event_d_id, event_q_id,
199 rte_panic("Error in configuring event queue\n");
200 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
203 event_q_conf.event_queue_cfg |= RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
204 event_q_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
205 ret = rte_event_queue_setup(event_d_id, event_q_id, &event_q_conf);
207 rte_panic("Error in configuring event queue for Tx adapter\n");
208 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
212 l2fwd_rx_tx_adapter_setup_generic(struct l2fwd_resources *rsrc)
214 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
215 struct rte_event_eth_rx_adapter_queue_conf eth_q_conf;
216 uint8_t event_d_id = evt_rsrc->event_d_id;
217 uint8_t rx_adptr_id = 0;
218 uint8_t tx_adptr_id = 0;
219 uint8_t tx_port_id = 0;
224 memset(ð_q_conf, 0, sizeof(eth_q_conf));
225 eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
227 /* Rx adapter setup */
228 evt_rsrc->rx_adptr.nb_rx_adptr = 1;
229 evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
230 evt_rsrc->rx_adptr.nb_rx_adptr);
231 if (!evt_rsrc->rx_adptr.rx_adptr) {
232 free(evt_rsrc->evp.event_p_id);
233 free(evt_rsrc->evq.event_q_id);
234 rte_panic("Failed to allocate memery for Rx adapter\n");
237 ret = rte_event_eth_rx_adapter_create(rx_adptr_id, event_d_id,
238 &evt_rsrc->def_p_conf);
240 rte_panic("Failed to create rx adapter\n");
242 /* Configure user requested sched type */
243 eth_q_conf.ev.sched_type = rsrc->sched_type;
244 RTE_ETH_FOREACH_DEV(port_id) {
245 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
247 eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[i];
248 ret = rte_event_eth_rx_adapter_queue_add(rx_adptr_id, port_id,
251 rte_panic("Failed to add queues to Rx adapter\n");
252 if (i < evt_rsrc->evq.nb_queues)
256 ret = rte_event_eth_rx_adapter_service_id_get(rx_adptr_id, &service_id);
257 if (ret != -ESRCH && ret != 0)
258 rte_panic("Error getting the service ID for rx adptr\n");
260 rte_service_runstate_set(service_id, 1);
261 rte_service_set_runstate_mapped_check(service_id, 0);
262 evt_rsrc->rx_adptr.service_id = service_id;
264 ret = rte_event_eth_rx_adapter_start(rx_adptr_id);
266 rte_panic("Rx adapter[%d] start Failed\n", rx_adptr_id);
268 evt_rsrc->rx_adptr.rx_adptr[0] = rx_adptr_id;
270 /* Tx adapter setup */
271 evt_rsrc->tx_adptr.nb_tx_adptr = 1;
272 evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
273 evt_rsrc->tx_adptr.nb_tx_adptr);
274 if (!evt_rsrc->tx_adptr.tx_adptr) {
275 free(evt_rsrc->rx_adptr.rx_adptr);
276 free(evt_rsrc->evp.event_p_id);
277 free(evt_rsrc->evq.event_q_id);
278 rte_panic("Failed to allocate memery for Rx adapter\n");
281 ret = rte_event_eth_tx_adapter_create(tx_adptr_id, event_d_id,
282 &evt_rsrc->def_p_conf);
284 rte_panic("Failed to create tx adapter\n");
286 RTE_ETH_FOREACH_DEV(port_id) {
287 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
289 ret = rte_event_eth_tx_adapter_queue_add(tx_adptr_id, port_id,
292 rte_panic("Failed to add queues to Tx adapter\n");
295 ret = rte_event_eth_tx_adapter_service_id_get(tx_adptr_id, &service_id);
296 if (ret != -ESRCH && ret != 0)
297 rte_panic("Failed to get Tx adapter service ID\n");
299 rte_service_runstate_set(service_id, 1);
300 rte_service_set_runstate_mapped_check(service_id, 0);
301 evt_rsrc->tx_adptr.service_id = service_id;
303 /* Extra port created. 8< */
304 ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id);
306 rte_panic("Failed to get Tx adapter port id: %d\n", ret);
308 ret = rte_event_port_link(event_d_id, tx_port_id,
309 &evt_rsrc->evq.event_q_id[
310 evt_rsrc->evq.nb_queues - 1],
313 rte_panic("Unable to link Tx adapter port to Tx queue:err=%d\n",
315 /* >8 End of extra port created. */
317 ret = rte_event_eth_tx_adapter_start(tx_adptr_id);
319 rte_panic("Tx adapter[%d] start Failed\n", tx_adptr_id);
321 evt_rsrc->tx_adptr.tx_adptr[0] = tx_adptr_id;
325 l2fwd_event_set_generic_ops(struct event_setup_ops *ops)
327 ops->event_device_setup = l2fwd_event_device_setup_generic;
328 ops->event_queue_setup = l2fwd_event_queue_setup_generic;
329 ops->event_port_setup = l2fwd_event_port_setup_generic;
330 ops->adapter_setup = l2fwd_rx_tx_adapter_setup_generic;