1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #include <rte_cycles.h>
9 #include <rte_ethdev.h>
10 #include <rte_eventdev.h>
11 #include <rte_event_eth_rx_adapter.h>
12 #include <rte_event_eth_tx_adapter.h>
13 #include <rte_lcore.h>
14 #include <rte_spinlock.h>
16 #include "l2fwd_common.h"
17 #include "l2fwd_event.h"
20 l2fwd_event_device_setup_internal_port(struct l2fwd_resources *rsrc)
22 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
23 struct rte_event_dev_config event_d_conf = {
24 .nb_events_limit = 4096,
25 .nb_event_queue_flows = 1024,
26 .nb_event_port_dequeue_depth = 128,
27 .nb_event_port_enqueue_depth = 128
29 struct rte_event_dev_info dev_info;
30 const uint8_t event_d_id = 0; /* Always use first event device only */
31 uint32_t event_queue_cfg = 0;
32 uint16_t ethdev_count = 0;
33 uint16_t num_workers = 0;
37 RTE_ETH_FOREACH_DEV(port_id) {
38 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
43 /* Event device configurtion */
44 rte_event_dev_info_get(event_d_id, &dev_info);
46 /* Enable implicit release */
47 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)
48 evt_rsrc->disable_implicit_release = 0;
50 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
51 event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
53 event_d_conf.nb_event_queues = ethdev_count;
54 if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
55 event_d_conf.nb_event_queues = dev_info.max_event_queues;
57 if (dev_info.max_num_events < event_d_conf.nb_events_limit)
58 event_d_conf.nb_events_limit = dev_info.max_num_events;
60 if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
61 event_d_conf.nb_event_queue_flows =
62 dev_info.max_event_queue_flows;
64 if (dev_info.max_event_port_dequeue_depth <
65 event_d_conf.nb_event_port_dequeue_depth)
66 event_d_conf.nb_event_port_dequeue_depth =
67 dev_info.max_event_port_dequeue_depth;
69 if (dev_info.max_event_port_enqueue_depth <
70 event_d_conf.nb_event_port_enqueue_depth)
71 event_d_conf.nb_event_port_enqueue_depth =
72 dev_info.max_event_port_enqueue_depth;
74 /* Ignore Main core. */
75 num_workers = rte_lcore_count() - 1;
76 if (dev_info.max_event_ports < num_workers)
77 num_workers = dev_info.max_event_ports;
79 event_d_conf.nb_event_ports = num_workers;
80 evt_rsrc->evp.nb_ports = num_workers;
81 evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues;
82 evt_rsrc->has_burst = !!(dev_info.event_dev_cap &
83 RTE_EVENT_DEV_CAP_BURST_MODE);
85 ret = rte_event_dev_configure(event_d_id, &event_d_conf);
87 rte_panic("Error in configuring event device\n");
89 evt_rsrc->event_d_id = event_d_id;
90 return event_queue_cfg;
94 l2fwd_event_port_setup_internal_port(struct l2fwd_resources *rsrc)
96 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
97 uint8_t event_d_id = evt_rsrc->event_d_id;
98 struct rte_event_port_conf event_p_conf = {
101 .new_event_threshold = 4096
103 struct rte_event_port_conf def_p_conf;
107 evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
108 evt_rsrc->evp.nb_ports);
109 if (!evt_rsrc->evp.event_p_id)
110 rte_panic("Failed to allocate memory for Event Ports\n");
112 ret = rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
114 rte_panic("Error to get default configuration of event port\n");
116 if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
117 event_p_conf.new_event_threshold =
118 def_p_conf.new_event_threshold;
120 if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
121 event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
123 if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
124 event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
126 event_p_conf.event_port_cfg = 0;
127 if (evt_rsrc->disable_implicit_release)
128 event_p_conf.event_port_cfg |=
129 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
131 for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
133 ret = rte_event_port_setup(event_d_id, event_p_id,
136 rte_panic("Error in configuring event port %d\n",
139 ret = rte_event_port_link(event_d_id, event_p_id, NULL,
142 rte_panic("Error in linking event port %d to queue\n",
144 evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
147 rte_spinlock_init(&evt_rsrc->evp.lock);
150 evt_rsrc->def_p_conf = event_p_conf;
154 l2fwd_event_queue_setup_internal_port(struct l2fwd_resources *rsrc,
155 uint32_t event_queue_cfg)
157 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
158 uint8_t event_d_id = evt_rsrc->event_d_id;
159 struct rte_event_queue_conf event_q_conf = {
160 .nb_atomic_flows = 1024,
161 .nb_atomic_order_sequences = 1024,
162 .event_queue_cfg = event_queue_cfg,
163 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
165 struct rte_event_queue_conf def_q_conf;
166 uint8_t event_q_id = 0;
169 ret = rte_event_queue_default_conf_get(event_d_id, event_q_id,
172 rte_panic("Error to get default config of event queue\n");
174 if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
175 event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
177 if (def_q_conf.nb_atomic_order_sequences <
178 event_q_conf.nb_atomic_order_sequences)
179 event_q_conf.nb_atomic_order_sequences =
180 def_q_conf.nb_atomic_order_sequences;
182 event_q_conf.event_queue_cfg = event_queue_cfg;
183 event_q_conf.schedule_type = rsrc->sched_type;
184 evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
185 evt_rsrc->evq.nb_queues);
186 if (!evt_rsrc->evq.event_q_id)
187 rte_panic("Memory allocation failure\n");
189 for (event_q_id = 0; event_q_id < evt_rsrc->evq.nb_queues;
191 ret = rte_event_queue_setup(event_d_id, event_q_id,
194 rte_panic("Error in configuring event queue\n");
195 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
200 l2fwd_rx_tx_adapter_setup_internal_port(struct l2fwd_resources *rsrc)
202 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
203 struct rte_event_eth_rx_adapter_queue_conf eth_q_conf;
204 uint8_t event_d_id = evt_rsrc->event_d_id;
205 uint16_t adapter_id = 0;
206 uint16_t nb_adapter = 0;
211 memset(ð_q_conf, 0, sizeof(eth_q_conf));
212 eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
214 RTE_ETH_FOREACH_DEV(port_id) {
215 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
220 evt_rsrc->rx_adptr.nb_rx_adptr = nb_adapter;
221 evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
222 evt_rsrc->rx_adptr.nb_rx_adptr);
223 if (!evt_rsrc->rx_adptr.rx_adptr) {
224 free(evt_rsrc->evp.event_p_id);
225 free(evt_rsrc->evq.event_q_id);
226 rte_panic("Failed to allocate memery for Rx adapter\n");
229 /* Assigned ethernet port. 8< */
230 RTE_ETH_FOREACH_DEV(port_id) {
231 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
234 if (rsrc->evt_vec.enabled) {
237 if (rte_event_eth_rx_adapter_caps_get(event_d_id,
240 "Failed to get event rx adapter capability");
242 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
243 eth_q_conf.vector_sz = rsrc->evt_vec.size;
244 eth_q_conf.vector_timeout_ns =
245 rsrc->evt_vec.timeout_ns;
246 eth_q_conf.vector_mp = rsrc->evt_vec_pool;
247 eth_q_conf.rx_queue_flags |=
248 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
251 "Rx adapter doesn't support event vector");
255 ret = rte_event_eth_rx_adapter_create(adapter_id, event_d_id,
256 &evt_rsrc->def_p_conf);
258 rte_panic("Failed to create rx adapter[%d]\n",
261 /* Configure user requested sched type*/
262 eth_q_conf.ev.sched_type = rsrc->sched_type;
263 eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[q_id];
264 ret = rte_event_eth_rx_adapter_queue_add(adapter_id, port_id,
267 rte_panic("Failed to add queues to Rx adapter\n");
269 ret = rte_event_eth_rx_adapter_start(adapter_id);
271 rte_panic("Rx adapter[%d] start Failed\n", adapter_id);
273 evt_rsrc->rx_adptr.rx_adptr[adapter_id] = adapter_id;
275 if (q_id < evt_rsrc->evq.nb_queues)
279 evt_rsrc->tx_adptr.nb_tx_adptr = nb_adapter;
280 evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
281 evt_rsrc->tx_adptr.nb_tx_adptr);
282 if (!evt_rsrc->tx_adptr.tx_adptr) {
283 free(evt_rsrc->rx_adptr.rx_adptr);
284 free(evt_rsrc->evp.event_p_id);
285 free(evt_rsrc->evq.event_q_id);
286 rte_panic("Failed to allocate memery for Rx adapter\n");
290 RTE_ETH_FOREACH_DEV(port_id) {
291 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
293 ret = rte_event_eth_tx_adapter_create(adapter_id, event_d_id,
294 &evt_rsrc->def_p_conf);
296 rte_panic("Failed to create tx adapter[%d]\n",
299 ret = rte_event_eth_tx_adapter_queue_add(adapter_id, port_id,
302 rte_panic("Failed to add queues to Tx adapter\n");
304 ret = rte_event_eth_tx_adapter_start(adapter_id);
306 rte_panic("Tx adapter[%d] start Failed\n", adapter_id);
308 evt_rsrc->tx_adptr.tx_adptr[adapter_id] = adapter_id;
311 /* >8 End of assigned ethernet port. */
315 l2fwd_event_set_internal_port_ops(struct event_setup_ops *ops)
317 ops->event_device_setup = l2fwd_event_device_setup_internal_port;
318 ops->event_queue_setup = l2fwd_event_queue_setup_internal_port;
319 ops->event_port_setup = l2fwd_event_port_setup_internal_port;
320 ops->adapter_setup = l2fwd_rx_tx_adapter_setup_internal_port;