1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #include <rte_cycles.h>
9 #include <rte_ethdev.h>
10 #include <rte_eventdev.h>
11 #include <rte_event_eth_rx_adapter.h>
12 #include <rte_event_eth_tx_adapter.h>
13 #include <rte_lcore.h>
14 #include <rte_spinlock.h>
16 #include "l2fwd_common.h"
17 #include "l2fwd_event.h"
20 l2fwd_event_device_setup_internal_port(struct l2fwd_resources *rsrc)
22 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
23 struct rte_event_dev_config event_d_conf = {
24 .nb_events_limit = 4096,
25 .nb_event_queue_flows = 1024,
26 .nb_event_port_dequeue_depth = 128,
27 .nb_event_port_enqueue_depth = 128
29 struct rte_event_dev_info dev_info;
30 uint8_t disable_implicit_release;
31 const uint8_t event_d_id = 0; /* Always use first event device only */
32 uint32_t event_queue_cfg = 0;
33 uint16_t ethdev_count = 0;
34 uint16_t num_workers = 0;
38 RTE_ETH_FOREACH_DEV(port_id) {
39 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
44 /* Event device configurtion */
45 rte_event_dev_info_get(event_d_id, &dev_info);
47 disable_implicit_release = !!(dev_info.event_dev_cap &
48 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
49 evt_rsrc->disable_implicit_release =
50 disable_implicit_release;
52 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
53 event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
55 event_d_conf.nb_event_queues = ethdev_count;
56 if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
57 event_d_conf.nb_event_queues = dev_info.max_event_queues;
59 if (dev_info.max_num_events < event_d_conf.nb_events_limit)
60 event_d_conf.nb_events_limit = dev_info.max_num_events;
62 if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
63 event_d_conf.nb_event_queue_flows =
64 dev_info.max_event_queue_flows;
66 if (dev_info.max_event_port_dequeue_depth <
67 event_d_conf.nb_event_port_dequeue_depth)
68 event_d_conf.nb_event_port_dequeue_depth =
69 dev_info.max_event_port_dequeue_depth;
71 if (dev_info.max_event_port_enqueue_depth <
72 event_d_conf.nb_event_port_enqueue_depth)
73 event_d_conf.nb_event_port_enqueue_depth =
74 dev_info.max_event_port_enqueue_depth;
76 num_workers = rte_lcore_count();
77 if (dev_info.max_event_ports < num_workers)
78 num_workers = dev_info.max_event_ports;
80 event_d_conf.nb_event_ports = num_workers;
81 evt_rsrc->evp.nb_ports = num_workers;
82 evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues;
83 evt_rsrc->has_burst = !!(dev_info.event_dev_cap &
84 RTE_EVENT_DEV_CAP_BURST_MODE);
86 ret = rte_event_dev_configure(event_d_id, &event_d_conf);
88 rte_panic("Error in configuring event device\n");
90 evt_rsrc->event_d_id = event_d_id;
91 return event_queue_cfg;
95 l2fwd_event_port_setup_internal_port(struct l2fwd_resources *rsrc)
97 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
98 uint8_t event_d_id = evt_rsrc->event_d_id;
99 struct rte_event_port_conf event_p_conf = {
102 .new_event_threshold = 4096
104 struct rte_event_port_conf def_p_conf;
108 evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
109 evt_rsrc->evp.nb_ports);
110 if (!evt_rsrc->evp.event_p_id)
111 rte_panic("Failed to allocate memory for Event Ports\n");
113 rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
114 if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
115 event_p_conf.new_event_threshold =
116 def_p_conf.new_event_threshold;
118 if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
119 event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
121 if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
122 event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
124 event_p_conf.disable_implicit_release =
125 evt_rsrc->disable_implicit_release;
127 for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
129 ret = rte_event_port_setup(event_d_id, event_p_id,
132 rte_panic("Error in configuring event port %d\n",
135 ret = rte_event_port_link(event_d_id, event_p_id, NULL,
138 rte_panic("Error in linking event port %d to queue\n",
140 evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
143 rte_spinlock_init(&evt_rsrc->evp.lock);
146 evt_rsrc->def_p_conf = event_p_conf;
150 l2fwd_event_queue_setup_internal_port(struct l2fwd_resources *rsrc,
151 uint32_t event_queue_cfg)
153 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
154 uint8_t event_d_id = evt_rsrc->event_d_id;
155 struct rte_event_queue_conf event_q_conf = {
156 .nb_atomic_flows = 1024,
157 .nb_atomic_order_sequences = 1024,
158 .event_queue_cfg = event_queue_cfg,
159 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
161 struct rte_event_queue_conf def_q_conf;
162 uint8_t event_q_id = 0;
165 rte_event_queue_default_conf_get(event_d_id, event_q_id, &def_q_conf);
167 if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
168 event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
170 if (def_q_conf.nb_atomic_order_sequences <
171 event_q_conf.nb_atomic_order_sequences)
172 event_q_conf.nb_atomic_order_sequences =
173 def_q_conf.nb_atomic_order_sequences;
175 event_q_conf.event_queue_cfg = event_queue_cfg;
176 event_q_conf.schedule_type = rsrc->sched_type;
177 evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
178 evt_rsrc->evq.nb_queues);
179 if (!evt_rsrc->evq.event_q_id)
180 rte_panic("Memory allocation failure\n");
182 for (event_q_id = 0; event_q_id < evt_rsrc->evq.nb_queues;
184 ret = rte_event_queue_setup(event_d_id, event_q_id,
187 rte_panic("Error in configuring event queue\n");
188 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
193 l2fwd_event_set_internal_port_ops(struct event_setup_ops *ops)
195 ops->event_device_setup = l2fwd_event_device_setup_internal_port;
196 ops->event_queue_setup = l2fwd_event_queue_setup_internal_port;
197 ops->event_port_setup = l2fwd_event_port_setup_internal_port;