1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "l2fwd_common.h"
8 l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
10 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
11 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
12 struct rte_eth_conf port_conf = {
17 .mq_mode = RTE_ETH_MQ_TX_NONE,
20 uint16_t nb_ports_available = 0;
24 if (rsrc->event_mode) {
25 port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
26 port_conf.rx_adv_conf.rss_conf.rss_key = NULL;
27 port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP;
30 /* Initialise each port */
31 RTE_ETH_FOREACH_DEV(port_id) {
32 struct rte_eth_conf local_port_conf = port_conf;
33 struct rte_eth_dev_info dev_info;
34 struct rte_eth_rxconf rxq_conf;
35 struct rte_eth_txconf txq_conf;
37 /* skip ports that are not enabled */
38 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) {
39 printf("Skipping disabled port %u\n", port_id);
45 printf("Initializing port %u... ", port_id);
48 ret = rte_eth_dev_info_get(port_id, &dev_info);
50 rte_panic("Error during getting device (port %u) info: %s\n",
51 port_id, strerror(-ret));
52 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
53 dev_info.flow_type_rss_offloads;
54 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
55 port_conf.rx_adv_conf.rss_conf.rss_hf) {
56 printf("Port %u modified RSS hash function based on hardware support,"
57 "requested:%#"PRIx64" configured:%#"PRIx64"",
59 port_conf.rx_adv_conf.rss_conf.rss_hf,
60 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
63 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
64 local_port_conf.txmode.offloads |=
65 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
66 /* Configure RX and TX queue. 8< */
67 ret = rte_eth_dev_configure(port_id, 1, 1, &local_port_conf);
69 rte_panic("Cannot configure device: err=%d, port=%u\n",
71 /* >8 End of configuration RX and TX queue. */
73 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd,
76 rte_panic("Cannot adjust number of descriptors: err=%d, port=%u\n",
79 rte_eth_macaddr_get(port_id, &rsrc->eth_addr[port_id]);
81 /* init one RX queue */
83 rxq_conf = dev_info.default_rxconf;
84 rxq_conf.offloads = local_port_conf.rxmode.offloads;
85 /* Using lcore to poll one or several ports. 8< */
86 ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd,
87 rte_eth_dev_socket_id(port_id),
91 rte_panic("rte_eth_rx_queue_setup:err=%d, port=%u\n",
94 /* >8 End of using lcore to poll one or several ports. */
96 /* Init one TX queue on each port. 8< */
98 txq_conf = dev_info.default_txconf;
99 txq_conf.offloads = local_port_conf.txmode.offloads;
100 ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
101 rte_eth_dev_socket_id(port_id),
104 rte_panic("rte_eth_tx_queue_setup:err=%d, port=%u\n",
106 /* >8 End of init one TX queue on each port. */
108 rte_eth_promiscuous_enable(port_id);
110 printf("Port %u,MAC address: " RTE_ETHER_ADDR_PRT_FMT "\n\n",
112 RTE_ETHER_ADDR_BYTES(&rsrc->eth_addr[port_id]));
115 return nb_ports_available;
119 l2fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
123 for (i = 0; i < num; i++) {
124 rte_pktmbuf_free_bulk(events[i].vec->mbufs,
125 events[i].vec->nb_elem);
126 rte_mempool_put(rte_mempool_from_obj(events[i].vec),
132 l2fwd_event_port_flush(uint8_t event_d_id __rte_unused, struct rte_event ev,
133 void *args __rte_unused)
135 if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
136 l2fwd_event_vector_array_free(&ev, 1);
138 rte_pktmbuf_free(ev.mbuf);
142 l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
143 struct rte_event events[], uint16_t nb_enq,
144 uint16_t nb_deq, uint8_t is_vector)
150 l2fwd_event_vector_array_free(events + nb_enq,
153 for (i = nb_enq; i < nb_deq; i++)
154 rte_pktmbuf_free(events[i].mbuf);
156 for (i = 0; i < nb_deq; i++)
157 events[i].op = RTE_EVENT_OP_RELEASE;
158 rte_event_enqueue_burst(event_d_id, port_id, events, nb_deq);
161 rte_event_port_quiesce(event_d_id, port_id, l2fwd_event_port_flush,