1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #include "l3fwd_event.h"
11 l3fwd_event_device_setup_internal_port(void)
13 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
14 struct rte_event_dev_config event_d_conf = {
15 .nb_events_limit = 4096,
16 .nb_event_queue_flows = 1024,
17 .nb_event_port_dequeue_depth = 128,
18 .nb_event_port_enqueue_depth = 128
20 struct rte_event_dev_info dev_info;
21 const uint8_t event_d_id = 0; /* Always use first event device only */
22 uint32_t event_queue_cfg = 0;
23 uint16_t ethdev_count = 0;
24 uint16_t num_workers = 0;
28 RTE_ETH_FOREACH_DEV(port_id) {
29 if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
34 /* Event device configuration */
35 rte_event_dev_info_get(event_d_id, &dev_info);
37 /* Enable implicit release */
38 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)
39 evt_rsrc->disable_implicit_release = 0;
41 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
42 event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
44 event_d_conf.nb_event_queues = ethdev_count;
45 if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
46 event_d_conf.nb_event_queues = dev_info.max_event_queues;
48 if (dev_info.max_num_events < event_d_conf.nb_events_limit)
49 event_d_conf.nb_events_limit = dev_info.max_num_events;
51 if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
52 event_d_conf.nb_event_queue_flows =
53 dev_info.max_event_queue_flows;
55 if (dev_info.max_event_port_dequeue_depth <
56 event_d_conf.nb_event_port_dequeue_depth)
57 event_d_conf.nb_event_port_dequeue_depth =
58 dev_info.max_event_port_dequeue_depth;
60 if (dev_info.max_event_port_enqueue_depth <
61 event_d_conf.nb_event_port_enqueue_depth)
62 event_d_conf.nb_event_port_enqueue_depth =
63 dev_info.max_event_port_enqueue_depth;
65 num_workers = rte_lcore_count();
66 if (dev_info.max_event_ports < num_workers)
67 num_workers = dev_info.max_event_ports;
69 event_d_conf.nb_event_ports = num_workers;
70 evt_rsrc->evp.nb_ports = num_workers;
71 evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues;
72 evt_rsrc->has_burst = !!(dev_info.event_dev_cap &
73 RTE_EVENT_DEV_CAP_BURST_MODE);
75 ret = rte_event_dev_configure(event_d_id, &event_d_conf);
77 rte_panic("Error in configuring event device\n");
79 evt_rsrc->event_d_id = event_d_id;
80 return event_queue_cfg;
84 l3fwd_event_port_setup_internal_port(void)
86 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
87 uint8_t event_d_id = evt_rsrc->event_d_id;
88 struct rte_event_port_conf event_p_conf = {
91 .new_event_threshold = 4096
93 struct rte_event_port_conf def_p_conf;
97 evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
98 evt_rsrc->evp.nb_ports);
99 if (!evt_rsrc->evp.event_p_id)
100 rte_panic("Failed to allocate memory for Event Ports\n");
102 rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
103 if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
104 event_p_conf.new_event_threshold =
105 def_p_conf.new_event_threshold;
107 if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
108 event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
110 if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
111 event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
113 event_p_conf.disable_implicit_release =
114 evt_rsrc->disable_implicit_release;
116 for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
118 ret = rte_event_port_setup(event_d_id, event_p_id,
121 rte_panic("Error in configuring event port %d\n",
124 ret = rte_event_port_link(event_d_id, event_p_id, NULL,
127 rte_panic("Error in linking event port %d to queue\n",
129 evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
132 rte_spinlock_init(&evt_rsrc->evp.lock);
135 evt_rsrc->def_p_conf = event_p_conf;
139 l3fwd_event_queue_setup_internal_port(uint32_t event_queue_cfg)
141 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
142 uint8_t event_d_id = evt_rsrc->event_d_id;
143 struct rte_event_queue_conf event_q_conf = {
144 .nb_atomic_flows = 1024,
145 .nb_atomic_order_sequences = 1024,
146 .event_queue_cfg = event_queue_cfg,
147 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
149 struct rte_event_queue_conf def_q_conf;
150 uint8_t event_q_id = 0;
153 rte_event_queue_default_conf_get(event_d_id, event_q_id, &def_q_conf);
155 if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
156 event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
158 if (def_q_conf.nb_atomic_order_sequences <
159 event_q_conf.nb_atomic_order_sequences)
160 event_q_conf.nb_atomic_order_sequences =
161 def_q_conf.nb_atomic_order_sequences;
163 event_q_conf.event_queue_cfg = event_queue_cfg;
164 event_q_conf.schedule_type = evt_rsrc->sched_type;
165 evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
166 evt_rsrc->evq.nb_queues);
167 if (!evt_rsrc->evq.event_q_id)
168 rte_panic("Memory allocation failure\n");
170 for (event_q_id = 0; event_q_id < evt_rsrc->evq.nb_queues;
172 ret = rte_event_queue_setup(event_d_id, event_q_id,
175 rte_panic("Error in configuring event queue\n");
176 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
181 l3fwd_rx_tx_adapter_setup_internal_port(void)
183 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
184 struct rte_event_eth_rx_adapter_queue_conf eth_q_conf;
185 uint8_t event_d_id = evt_rsrc->event_d_id;
186 uint16_t adapter_id = 0;
187 uint16_t nb_adapter = 0;
192 memset(ð_q_conf, 0, sizeof(eth_q_conf));
193 eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
195 RTE_ETH_FOREACH_DEV(port_id) {
196 if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
201 evt_rsrc->rx_adptr.nb_rx_adptr = nb_adapter;
202 evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
203 evt_rsrc->rx_adptr.nb_rx_adptr);
204 if (!evt_rsrc->rx_adptr.rx_adptr) {
205 free(evt_rsrc->evp.event_p_id);
206 free(evt_rsrc->evq.event_q_id);
207 rte_panic("Failed to allocate memory for Rx adapter\n");
211 RTE_ETH_FOREACH_DEV(port_id) {
212 if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
214 ret = rte_event_eth_rx_adapter_create(adapter_id, event_d_id,
215 &evt_rsrc->def_p_conf);
217 rte_panic("Failed to create rx adapter[%d]\n",
220 /* Configure user requested sched type*/
221 eth_q_conf.ev.sched_type = evt_rsrc->sched_type;
222 eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[q_id];
223 ret = rte_event_eth_rx_adapter_queue_add(adapter_id, port_id,
226 rte_panic("Failed to add queues to Rx adapter\n");
228 ret = rte_event_eth_rx_adapter_start(adapter_id);
230 rte_panic("Rx adapter[%d] start Failed\n", adapter_id);
232 evt_rsrc->rx_adptr.rx_adptr[adapter_id] = adapter_id;
234 if (q_id < evt_rsrc->evq.nb_queues)
238 evt_rsrc->tx_adptr.nb_tx_adptr = nb_adapter;
239 evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
240 evt_rsrc->tx_adptr.nb_tx_adptr);
241 if (!evt_rsrc->tx_adptr.tx_adptr) {
242 free(evt_rsrc->rx_adptr.rx_adptr);
243 free(evt_rsrc->evp.event_p_id);
244 free(evt_rsrc->evq.event_q_id);
245 rte_panic("Failed to allocate memory for Rx adapter\n");
249 RTE_ETH_FOREACH_DEV(port_id) {
250 if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
252 ret = rte_event_eth_tx_adapter_create(adapter_id, event_d_id,
253 &evt_rsrc->def_p_conf);
255 rte_panic("Failed to create tx adapter[%d]\n",
258 ret = rte_event_eth_tx_adapter_queue_add(adapter_id, port_id,
261 rte_panic("Failed to add queues to Tx adapter\n");
263 ret = rte_event_eth_tx_adapter_start(adapter_id);
265 rte_panic("Tx adapter[%d] start Failed\n", adapter_id);
267 evt_rsrc->tx_adptr.tx_adptr[adapter_id] = adapter_id;
273 l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops *ops)
275 ops->event_device_setup = l3fwd_event_device_setup_internal_port;
276 ops->event_queue_setup = l3fwd_event_queue_setup_internal_port;
277 ops->event_port_setup = l3fwd_event_port_setup_internal_port;
278 ops->adapter_setup = l3fwd_rx_tx_adapter_setup_internal_port;