1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #include <rte_malloc.h>
11 #include "l3fwd_event.h"
14 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
16 char buf[RTE_ETHER_ADDR_FMT_SIZE];
17 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
18 printf("%s%s", name, buf);
21 struct l3fwd_event_resources *
22 l3fwd_get_eventdev_rsrc(void)
24 static struct l3fwd_event_resources *rsrc;
29 rsrc = rte_zmalloc("l3fwd", sizeof(struct l3fwd_event_resources), 0);
31 rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
32 rsrc->eth_rx_queues = 1;
36 rte_exit(EXIT_FAILURE, "Unable to allocate memory for eventdev cfg\n");
42 l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf)
44 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
45 uint16_t nb_ports = rte_eth_dev_count_avail();
46 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
47 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
48 unsigned int nb_lcores = rte_lcore_count();
49 struct rte_eth_conf local_port_conf;
50 struct rte_eth_dev_info dev_info;
51 struct rte_eth_txconf txconf;
52 struct rte_eth_rxconf rxconf;
58 /* initialize all ports */
59 RTE_ETH_FOREACH_DEV(port_id) {
60 local_port_conf = *port_conf;
61 /* skip ports that are not enabled */
62 if ((evt_rsrc->port_mask & (1 << port_id)) == 0) {
63 printf("\nSkipping disabled port %d\n", port_id);
68 printf("Initializing port %d ... ", port_id);
70 printf("Creating queues: nb_rxq=%d nb_txq=1...\n",
71 evt_rsrc->eth_rx_queues);
73 ret = rte_eth_dev_info_get(port_id, &dev_info);
75 rte_panic("Error during getting device (port %u) info:"
76 "%s\n", port_id, strerror(-ret));
78 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
79 local_port_conf.txmode.offloads |=
80 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
82 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
83 dev_info.flow_type_rss_offloads;
84 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
85 port_conf->rx_adv_conf.rss_conf.rss_hf) {
86 printf("Port %u modified RSS hash function "
87 "based on hardware support,"
88 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
90 port_conf->rx_adv_conf.rss_conf.rss_hf,
91 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
94 ret = rte_eth_dev_configure(port_id, evt_rsrc->eth_rx_queues,
97 rte_exit(EXIT_FAILURE,
98 "Cannot configure device: err=%d, port=%d\n",
101 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd,
104 rte_exit(EXIT_FAILURE,
105 "Cannot adjust number of descriptors: err=%d, "
106 "port=%d\n", ret, port_id);
108 rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
109 print_ethaddr(" Address:", &ports_eth_addr[port_id]);
111 print_ethaddr("Destination:",
112 (const struct rte_ether_addr *)&dest_eth_addr[port_id]);
115 /* prepare source MAC for each port. */
116 rte_ether_addr_copy(&ports_eth_addr[port_id],
117 (struct rte_ether_addr *)(val_eth + port_id) + 1);
120 if (!evt_rsrc->per_port_pool) {
121 /* port_id = 0; this is *not* signifying the first port,
122 * rather, it signifies that port_id is ignored.
124 nb_mbuf = RTE_MAX(nb_ports * nb_rxd +
126 nb_ports * nb_lcores *
128 nb_lcores * MEMPOOL_CACHE_SIZE,
130 ret = init_mem(0, nb_mbuf);
132 nb_mbuf = RTE_MAX(nb_rxd + nb_rxd +
133 nb_lcores * MAX_PKT_BURST +
134 nb_lcores * MEMPOOL_CACHE_SIZE,
136 ret = init_mem(port_id, nb_mbuf);
138 /* init Rx queues per port */
139 rxconf = dev_info.default_rxconf;
140 rxconf.offloads = local_port_conf.rxmode.offloads;
142 for (eth_qid = 0; eth_qid < evt_rsrc->eth_rx_queues;
144 if (!evt_rsrc->per_port_pool)
145 ret = rte_eth_rx_queue_setup(port_id, eth_qid,
147 evt_rsrc->pkt_pool[0][0]);
149 ret = rte_eth_rx_queue_setup(port_id, eth_qid,
151 evt_rsrc->pkt_pool[port_id][0]);
153 rte_exit(EXIT_FAILURE,
154 "rte_eth_rx_queue_setup: err=%d, "
155 "port=%d, eth_qid: %d\n",
156 ret, port_id, eth_qid);
159 /* init one Tx queue per port */
160 txconf = dev_info.default_txconf;
161 txconf.offloads = local_port_conf.txmode.offloads;
162 ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd, 0, &txconf);
164 rte_exit(EXIT_FAILURE,
165 "rte_eth_tx_queue_setup: err=%d, "
166 "port=%d\n", ret, port_id);
171 l3fwd_event_capability_setup(void)
173 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
178 RTE_ETH_FOREACH_DEV(i) {
179 ret = rte_event_eth_tx_adapter_caps_get(0, i, &caps);
181 rte_exit(EXIT_FAILURE,
182 "Invalid capability for Tx adptr port %d\n",
185 evt_rsrc->tx_mode_q |= !(caps &
186 RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
189 if (evt_rsrc->tx_mode_q)
190 l3fwd_event_set_generic_ops(&evt_rsrc->ops);
192 l3fwd_event_set_internal_port_ops(&evt_rsrc->ops);
196 l3fwd_get_free_event_port(struct l3fwd_event_resources *evt_rsrc)
201 rte_spinlock_lock(&evt_rsrc->evp.lock);
202 if (index >= evt_rsrc->evp.nb_ports) {
203 printf("No free event port is available\n");
207 port_id = evt_rsrc->evp.event_p_id[index];
209 rte_spinlock_unlock(&evt_rsrc->evp.lock);
215 l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
217 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
218 const event_loop_cb lpm_event_loop[2][2] = {
219 [0][0] = lpm_event_main_loop_tx_d,
220 [0][1] = lpm_event_main_loop_tx_d_burst,
221 [1][0] = lpm_event_main_loop_tx_q,
222 [1][1] = lpm_event_main_loop_tx_q_burst,
224 const event_loop_cb em_event_loop[2][2] = {
225 [0][0] = em_event_main_loop_tx_d,
226 [0][1] = em_event_main_loop_tx_d_burst,
227 [1][0] = em_event_main_loop_tx_q,
228 [1][1] = em_event_main_loop_tx_q_burst,
230 const event_loop_cb fib_event_loop[2][2] = {
231 [0][0] = fib_event_main_loop_tx_d,
232 [0][1] = fib_event_main_loop_tx_d_burst,
233 [1][0] = fib_event_main_loop_tx_q,
234 [1][1] = fib_event_main_loop_tx_q_burst,
236 uint32_t event_queue_cfg;
239 if (!evt_rsrc->enabled)
242 if (!rte_event_dev_count())
243 rte_exit(EXIT_FAILURE, "No Eventdev found");
245 /* Setup eventdev capability callbacks */
246 l3fwd_event_capability_setup();
248 /* Ethernet device configuration */
249 l3fwd_eth_dev_port_setup(port_conf);
251 /* Event device configuration */
252 event_queue_cfg = evt_rsrc->ops.event_device_setup();
254 /* Event queue configuration */
255 evt_rsrc->ops.event_queue_setup(event_queue_cfg);
257 /* Event port configuration */
258 evt_rsrc->ops.event_port_setup();
260 /* Rx/Tx adapters configuration */
261 evt_rsrc->ops.adapter_setup();
263 /* Start event device */
264 ret = rte_event_dev_start(evt_rsrc->event_d_id);
266 rte_exit(EXIT_FAILURE, "Error in starting eventdev");
268 evt_rsrc->ops.lpm_event_loop = lpm_event_loop[evt_rsrc->tx_mode_q]
269 [evt_rsrc->has_burst];
271 evt_rsrc->ops.em_event_loop = em_event_loop[evt_rsrc->tx_mode_q]
272 [evt_rsrc->has_burst];
274 evt_rsrc->ops.fib_event_loop = fib_event_loop[evt_rsrc->tx_mode_q]
275 [evt_rsrc->has_burst];