1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #include "l3fwd_event.h"
11 l3fwd_event_device_setup_generic(void)
13 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
14 struct rte_event_dev_config event_d_conf = {
15 .nb_events_limit = 4096,
16 .nb_event_queue_flows = 1024,
17 .nb_event_port_dequeue_depth = 128,
18 .nb_event_port_enqueue_depth = 128
20 struct rte_event_dev_info dev_info;
21 const uint8_t event_d_id = 0; /* Always use first event device only */
22 uint32_t event_queue_cfg = 0;
23 uint16_t ethdev_count = 0;
24 uint16_t num_workers = 0;
28 RTE_ETH_FOREACH_DEV(port_id) {
29 if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
34 /* Event device configuration */
35 rte_event_dev_info_get(event_d_id, &dev_info);
36 /* Enable implicit release */
37 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)
38 evt_rsrc->disable_implicit_release = 0;
40 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
41 event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
43 /* One queue for each ethdev port + one Tx adapter Single link queue. */
44 event_d_conf.nb_event_queues = ethdev_count + 1;
45 if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
46 event_d_conf.nb_event_queues = dev_info.max_event_queues;
48 if (dev_info.max_num_events < event_d_conf.nb_events_limit)
49 event_d_conf.nb_events_limit = dev_info.max_num_events;
51 if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
52 event_d_conf.nb_event_queue_flows =
53 dev_info.max_event_queue_flows;
55 if (dev_info.max_event_port_dequeue_depth <
56 event_d_conf.nb_event_port_dequeue_depth)
57 event_d_conf.nb_event_port_dequeue_depth =
58 dev_info.max_event_port_dequeue_depth;
60 if (dev_info.max_event_port_enqueue_depth <
61 event_d_conf.nb_event_port_enqueue_depth)
62 event_d_conf.nb_event_port_enqueue_depth =
63 dev_info.max_event_port_enqueue_depth;
65 num_workers = rte_lcore_count() - rte_service_lcore_count();
66 if (dev_info.max_event_ports < num_workers)
67 num_workers = dev_info.max_event_ports;
69 event_d_conf.nb_event_ports = num_workers;
70 evt_rsrc->evp.nb_ports = num_workers;
71 evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues;
73 evt_rsrc->has_burst = !!(dev_info.event_dev_cap &
74 RTE_EVENT_DEV_CAP_BURST_MODE);
76 ret = rte_event_dev_configure(event_d_id, &event_d_conf);
78 rte_panic("Error in configuring event device\n");
80 evt_rsrc->event_d_id = event_d_id;
81 return event_queue_cfg;
85 l3fwd_event_port_setup_generic(void)
87 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
88 uint8_t event_d_id = evt_rsrc->event_d_id;
89 struct rte_event_port_conf event_p_conf = {
92 .new_event_threshold = 4096
94 struct rte_event_port_conf def_p_conf;
98 evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
99 evt_rsrc->evp.nb_ports);
100 if (!evt_rsrc->evp.event_p_id)
101 rte_panic("No space is available\n");
103 memset(&def_p_conf, 0, sizeof(struct rte_event_port_conf));
104 ret = rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
106 rte_panic("Error to get default configuration of event port\n");
108 if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
109 event_p_conf.new_event_threshold =
110 def_p_conf.new_event_threshold;
112 if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
113 event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
115 if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
116 event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
118 event_p_conf.event_port_cfg = 0;
119 if (evt_rsrc->disable_implicit_release)
120 event_p_conf.event_port_cfg |=
121 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
123 evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
125 for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
127 ret = rte_event_port_setup(event_d_id, event_p_id,
130 rte_panic("Error in configuring event port %d\n",
133 ret = rte_event_port_link(event_d_id, event_p_id,
134 evt_rsrc->evq.event_q_id,
136 evt_rsrc->evq.nb_queues - 1);
137 if (ret != (evt_rsrc->evq.nb_queues - 1))
138 rte_panic("Error in linking event port %d to queues\n",
140 evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
143 rte_spinlock_init(&evt_rsrc->evp.lock);
145 evt_rsrc->def_p_conf = event_p_conf;
149 l3fwd_event_queue_setup_generic(uint32_t event_queue_cfg)
151 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
152 uint8_t event_d_id = evt_rsrc->event_d_id;
153 struct rte_event_queue_conf event_q_conf = {
154 .nb_atomic_flows = 1024,
155 .nb_atomic_order_sequences = 1024,
156 .event_queue_cfg = event_queue_cfg,
157 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
159 struct rte_event_queue_conf def_q_conf;
163 event_q_conf.schedule_type = evt_rsrc->sched_type;
164 evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
165 evt_rsrc->evq.nb_queues);
166 if (!evt_rsrc->evq.event_q_id)
167 rte_panic("Memory allocation failure\n");
169 ret = rte_event_queue_default_conf_get(event_d_id, 0, &def_q_conf);
171 rte_panic("Error to get default config of event queue\n");
173 if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
174 event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
176 for (event_q_id = 0; event_q_id < (evt_rsrc->evq.nb_queues - 1);
178 ret = rte_event_queue_setup(event_d_id, event_q_id,
181 rte_panic("Error in configuring event queue\n");
182 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
185 event_q_conf.event_queue_cfg |= RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
186 event_q_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
187 ret = rte_event_queue_setup(event_d_id, event_q_id, &event_q_conf);
189 rte_panic("Error in configuring event queue for Tx adapter\n");
190 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
194 l3fwd_rx_tx_adapter_setup_generic(void)
196 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
197 struct rte_event_eth_rx_adapter_queue_conf eth_q_conf;
198 uint8_t event_d_id = evt_rsrc->event_d_id;
199 uint8_t rx_adptr_id = 0;
200 uint8_t tx_adptr_id = 0;
201 uint8_t tx_port_id = 0;
206 memset(ð_q_conf, 0, sizeof(eth_q_conf));
207 eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
209 /* Rx adapter setup */
210 evt_rsrc->rx_adptr.nb_rx_adptr = 1;
211 evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
212 evt_rsrc->rx_adptr.nb_rx_adptr);
213 if (!evt_rsrc->rx_adptr.rx_adptr) {
214 free(evt_rsrc->evp.event_p_id);
215 free(evt_rsrc->evq.event_q_id);
216 rte_panic("Failed to allocate memory for Rx adapter\n");
219 ret = rte_event_eth_rx_adapter_create(rx_adptr_id, event_d_id,
220 &evt_rsrc->def_p_conf);
222 rte_panic("Failed to create rx adapter\n");
224 /* Configure user requested sched type */
225 eth_q_conf.ev.sched_type = evt_rsrc->sched_type;
226 RTE_ETH_FOREACH_DEV(port_id) {
227 if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
229 eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[i];
230 ret = rte_event_eth_rx_adapter_queue_add(rx_adptr_id, port_id,
233 rte_panic("Failed to add queues to Rx adapter\n");
234 if (i < evt_rsrc->evq.nb_queues)
238 ret = rte_event_eth_rx_adapter_service_id_get(rx_adptr_id, &service_id);
239 if (ret != -ESRCH && ret != 0)
240 rte_panic("Error getting the service ID for rx adptr\n");
242 rte_service_runstate_set(service_id, 1);
243 rte_service_set_runstate_mapped_check(service_id, 0);
244 evt_rsrc->rx_adptr.service_id = service_id;
246 ret = rte_event_eth_rx_adapter_start(rx_adptr_id);
248 rte_panic("Rx adapter[%d] start Failed\n", rx_adptr_id);
250 evt_rsrc->rx_adptr.rx_adptr[0] = rx_adptr_id;
252 /* Tx adapter setup */
253 evt_rsrc->tx_adptr.nb_tx_adptr = 1;
254 evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
255 evt_rsrc->tx_adptr.nb_tx_adptr);
256 if (!evt_rsrc->tx_adptr.tx_adptr) {
257 free(evt_rsrc->rx_adptr.rx_adptr);
258 free(evt_rsrc->evp.event_p_id);
259 free(evt_rsrc->evq.event_q_id);
260 rte_panic("Failed to allocate memory for Rx adapter\n");
263 ret = rte_event_eth_tx_adapter_create(tx_adptr_id, event_d_id,
264 &evt_rsrc->def_p_conf);
266 rte_panic("Failed to create tx adapter\n");
268 RTE_ETH_FOREACH_DEV(port_id) {
269 if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
271 ret = rte_event_eth_tx_adapter_queue_add(tx_adptr_id, port_id,
274 rte_panic("Failed to add queues to Tx adapter\n");
277 ret = rte_event_eth_tx_adapter_service_id_get(tx_adptr_id, &service_id);
278 if (ret != -ESRCH && ret != 0)
279 rte_panic("Failed to get Tx adapter service ID\n");
281 rte_service_runstate_set(service_id, 1);
282 rte_service_set_runstate_mapped_check(service_id, 0);
283 evt_rsrc->tx_adptr.service_id = service_id;
285 ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id);
287 rte_panic("Failed to get Tx adapter port id: %d\n", ret);
289 ret = rte_event_port_link(event_d_id, tx_port_id,
290 &evt_rsrc->evq.event_q_id[
291 evt_rsrc->evq.nb_queues - 1],
294 rte_panic("Unable to link Tx adapter port to Tx queue:err=%d\n",
297 ret = rte_event_eth_tx_adapter_start(tx_adptr_id);
299 rte_panic("Tx adapter[%d] start Failed\n", tx_adptr_id);
301 evt_rsrc->tx_adptr.tx_adptr[0] = tx_adptr_id;
305 l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops)
307 ops->event_device_setup = l3fwd_event_device_setup_generic;
308 ops->event_queue_setup = l3fwd_event_queue_setup_generic;
309 ops->event_port_setup = l3fwd_event_port_setup_generic;
310 ops->adapter_setup = l3fwd_rx_tx_adapter_setup_generic;