1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #include <rte_malloc.h>
11 #include "l3fwd_event.h"
14 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
16 char buf[RTE_ETHER_ADDR_FMT_SIZE];
17 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
18 printf("%s%s", name, buf);
21 struct l3fwd_event_resources *
22 l3fwd_get_eventdev_rsrc(void)
24 static struct l3fwd_event_resources *rsrc;
29 rsrc = rte_zmalloc("l3fwd", sizeof(struct l3fwd_event_resources), 0);
31 rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
32 rsrc->eth_rx_queues = 1;
36 rte_exit(EXIT_FAILURE, "Unable to allocate memory for eventdev cfg\n");
42 l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf)
44 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
45 uint16_t nb_ports = rte_eth_dev_count_avail();
46 unsigned int nb_lcores = rte_lcore_count();
47 struct rte_eth_conf local_port_conf;
48 struct rte_eth_dev_info dev_info;
49 struct rte_eth_txconf txconf;
50 struct rte_eth_rxconf rxconf;
56 /* initialize all ports */
57 RTE_ETH_FOREACH_DEV(port_id) {
58 local_port_conf = *port_conf;
59 /* skip ports that are not enabled */
60 if ((evt_rsrc->port_mask & (1 << port_id)) == 0) {
61 printf("\nSkipping disabled port %d\n", port_id);
66 printf("Initializing port %d ... ", port_id);
68 printf("Creating queues: nb_rxq=%d nb_txq=1...\n",
69 evt_rsrc->eth_rx_queues);
71 ret = rte_eth_dev_info_get(port_id, &dev_info);
73 rte_panic("Error during getting device (port %u) info:"
74 "%s\n", port_id, strerror(-ret));
76 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
77 local_port_conf.txmode.offloads |=
78 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
80 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
81 dev_info.flow_type_rss_offloads;
82 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
83 port_conf->rx_adv_conf.rss_conf.rss_hf) {
84 printf("Port %u modified RSS hash function "
85 "based on hardware support,"
86 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
88 port_conf->rx_adv_conf.rss_conf.rss_hf,
89 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
92 ret = rte_eth_dev_configure(port_id, evt_rsrc->eth_rx_queues,
95 rte_exit(EXIT_FAILURE,
96 "Cannot configure device: err=%d, port=%d\n",
99 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd,
102 rte_exit(EXIT_FAILURE,
103 "Cannot adjust number of descriptors: err=%d, "
104 "port=%d\n", ret, port_id);
106 rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
107 print_ethaddr(" Address:", &ports_eth_addr[port_id]);
109 print_ethaddr("Destination:",
110 (const struct rte_ether_addr *)&dest_eth_addr[port_id]);
113 /* prepare source MAC for each port. */
114 rte_ether_addr_copy(&ports_eth_addr[port_id],
115 (struct rte_ether_addr *)(val_eth + port_id) + 1);
118 if (!evt_rsrc->per_port_pool) {
119 /* port_id = 0; this is *not* signifying the first port,
120 * rather, it signifies that port_id is ignored.
122 nb_mbuf = RTE_MAX(nb_ports * nb_rxd +
124 nb_ports * nb_lcores *
126 nb_lcores * MEMPOOL_CACHE_SIZE,
128 ret = init_mem(0, nb_mbuf);
130 nb_mbuf = RTE_MAX(nb_rxd + nb_rxd +
131 nb_lcores * MAX_PKT_BURST +
132 nb_lcores * MEMPOOL_CACHE_SIZE,
134 ret = init_mem(port_id, nb_mbuf);
136 /* init Rx queues per port */
137 rxconf = dev_info.default_rxconf;
138 rxconf.offloads = local_port_conf.rxmode.offloads;
140 for (eth_qid = 0; eth_qid < evt_rsrc->eth_rx_queues;
142 if (!evt_rsrc->per_port_pool)
143 ret = rte_eth_rx_queue_setup(port_id, eth_qid,
145 evt_rsrc->pkt_pool[0][0]);
147 ret = rte_eth_rx_queue_setup(port_id, eth_qid,
149 evt_rsrc->pkt_pool[port_id][0]);
151 rte_exit(EXIT_FAILURE,
152 "rte_eth_rx_queue_setup: err=%d, "
153 "port=%d, eth_qid: %d\n",
154 ret, port_id, eth_qid);
157 /* init one Tx queue per port */
158 txconf = dev_info.default_txconf;
159 txconf.offloads = local_port_conf.txmode.offloads;
160 ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd, 0, &txconf);
162 rte_exit(EXIT_FAILURE,
163 "rte_eth_tx_queue_setup: err=%d, "
164 "port=%d\n", ret, port_id);
169 l3fwd_event_capability_setup(void)
171 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
176 RTE_ETH_FOREACH_DEV(i) {
177 ret = rte_event_eth_tx_adapter_caps_get(0, i, &caps);
179 rte_exit(EXIT_FAILURE,
180 "Invalid capability for Tx adptr port %d\n",
183 evt_rsrc->tx_mode_q |= !(caps &
184 RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
187 if (evt_rsrc->tx_mode_q)
188 l3fwd_event_set_generic_ops(&evt_rsrc->ops);
190 l3fwd_event_set_internal_port_ops(&evt_rsrc->ops);
194 l3fwd_get_free_event_port(struct l3fwd_event_resources *evt_rsrc)
199 rte_spinlock_lock(&evt_rsrc->evp.lock);
200 if (index >= evt_rsrc->evp.nb_ports) {
201 printf("No free event port is available\n");
205 port_id = evt_rsrc->evp.event_p_id[index];
207 rte_spinlock_unlock(&evt_rsrc->evp.lock);
213 l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
215 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
216 const event_loop_cb lpm_event_loop[2][2][2] = {
217 [0][0][0] = lpm_event_main_loop_tx_d,
218 [0][0][1] = lpm_event_main_loop_tx_d_burst,
219 [0][1][0] = lpm_event_main_loop_tx_q,
220 [0][1][1] = lpm_event_main_loop_tx_q_burst,
221 [1][0][0] = lpm_event_main_loop_tx_d_vector,
222 [1][0][1] = lpm_event_main_loop_tx_d_burst_vector,
223 [1][1][0] = lpm_event_main_loop_tx_q_vector,
224 [1][1][1] = lpm_event_main_loop_tx_q_burst_vector,
226 const event_loop_cb em_event_loop[2][2][2] = {
227 [0][0][0] = em_event_main_loop_tx_d,
228 [0][0][1] = em_event_main_loop_tx_d_burst,
229 [0][1][0] = em_event_main_loop_tx_q,
230 [0][1][1] = em_event_main_loop_tx_q_burst,
231 [1][0][0] = em_event_main_loop_tx_d_vector,
232 [1][0][1] = em_event_main_loop_tx_d_burst_vector,
233 [1][1][0] = em_event_main_loop_tx_q_vector,
234 [1][1][1] = em_event_main_loop_tx_q_burst_vector,
236 const event_loop_cb fib_event_loop[2][2][2] = {
237 [0][0][0] = fib_event_main_loop_tx_d,
238 [0][0][1] = fib_event_main_loop_tx_d_burst,
239 [0][1][0] = fib_event_main_loop_tx_q,
240 [0][1][1] = fib_event_main_loop_tx_q_burst,
241 [1][0][0] = fib_event_main_loop_tx_d_vector,
242 [1][0][1] = fib_event_main_loop_tx_d_burst_vector,
243 [1][1][0] = fib_event_main_loop_tx_q_vector,
244 [1][1][1] = fib_event_main_loop_tx_q_burst_vector,
246 uint32_t event_queue_cfg;
249 if (!evt_rsrc->enabled)
252 if (!rte_event_dev_count())
253 rte_exit(EXIT_FAILURE, "No Eventdev found");
255 /* Setup eventdev capability callbacks */
256 l3fwd_event_capability_setup();
258 /* Ethernet device configuration */
259 l3fwd_eth_dev_port_setup(port_conf);
261 /* Event device configuration */
262 event_queue_cfg = evt_rsrc->ops.event_device_setup();
264 /* Event queue configuration */
265 evt_rsrc->ops.event_queue_setup(event_queue_cfg);
267 /* Event port configuration */
268 evt_rsrc->ops.event_port_setup();
270 /* Rx/Tx adapters configuration */
271 evt_rsrc->ops.adapter_setup();
273 /* Start event device */
274 ret = rte_event_dev_start(evt_rsrc->event_d_id);
276 rte_exit(EXIT_FAILURE, "Error in starting eventdev");
278 evt_rsrc->ops.lpm_event_loop =
279 lpm_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
280 [evt_rsrc->has_burst];
282 evt_rsrc->ops.em_event_loop =
283 em_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
284 [evt_rsrc->has_burst];
286 evt_rsrc->ops.fib_event_loop =
287 fib_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
288 [evt_rsrc->has_burst];
292 l3fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
296 for (i = 0; i < num; i++) {
297 rte_pktmbuf_free_bulk(events[i].vec->mbufs,
298 events[i].vec->nb_elem);
299 rte_mempool_put(rte_mempool_from_obj(events[i].vec),
305 l3fwd_event_port_flush(uint8_t event_d_id __rte_unused, struct rte_event ev,
306 void *args __rte_unused)
308 if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
309 l3fwd_event_vector_array_free(&ev, 1);
311 rte_pktmbuf_free(ev.mbuf);
315 l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
316 struct rte_event events[], uint16_t nb_enq,
317 uint16_t nb_deq, uint8_t is_vector)
323 l3fwd_event_vector_array_free(events + nb_enq,
326 for (i = nb_enq; i < nb_deq; i++)
327 rte_pktmbuf_free(events[i].mbuf);
329 for (i = 0; i < nb_deq; i++)
330 events[i].op = RTE_EVENT_OP_RELEASE;
331 rte_event_enqueue_burst(event_d_id, event_p_id, events, nb_deq);
334 rte_event_port_quiesce(event_d_id, event_p_id, l3fwd_event_port_flush,