1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
8 #include <rte_ethdev.h>
9 #include <rte_string_fns.h>
14 static struct link_list link_list;
19 TAILQ_INIT(&link_list);
25 link_find(const char *name)
32 TAILQ_FOREACH(link, &link_list, node)
33 if (strcmp(link->name, name) == 0)
40 link_next(struct link *link)
42 return (link == NULL) ? TAILQ_FIRST(&link_list) : TAILQ_NEXT(link, node);
45 static struct rte_eth_conf port_conf_default = {
48 .mq_mode = ETH_MQ_RX_NONE,
49 .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
50 .split_hdr_size = 0, /* Header split buffer size */
51 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
61 .mq_mode = ETH_MQ_TX_NONE,
66 #define RETA_CONF_SIZE (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
69 rss_setup(uint16_t port_id,
71 struct link_params_rss *rss)
73 struct rte_eth_rss_reta_entry64 reta_conf[RETA_CONF_SIZE];
78 memset(reta_conf, 0, sizeof(reta_conf));
80 for (i = 0; i < reta_size; i++)
81 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
83 for (i = 0; i < reta_size; i++) {
84 uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
85 uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
86 uint32_t rss_qs_pos = i % rss->n_queues;
88 reta_conf[reta_id].reta[reta_pos] =
89 (uint16_t) rss->queue_id[rss_qs_pos];
93 status = rte_eth_dev_rss_reta_update(port_id,
101 link_create(const char *name, struct link_params *params)
103 struct rte_eth_dev_info port_info;
104 struct rte_eth_conf port_conf;
106 struct link_params_rss *rss;
107 struct mempool *mempool;
112 /* Check input params */
113 if ((name == NULL) ||
116 (params->rx.n_queues == 0) ||
117 (params->rx.queue_size == 0) ||
118 (params->tx.n_queues == 0) ||
119 (params->tx.queue_size == 0))
122 port_id = params->port_id;
123 if (params->dev_name) {
124 status = rte_eth_dev_get_port_by_name(params->dev_name,
130 if (!rte_eth_dev_is_valid_port(port_id))
133 rte_eth_dev_info_get(port_id, &port_info);
135 mempool = mempool_find(params->rx.mempool_name);
139 rss = params->rx.rss;
141 if ((port_info.reta_size == 0) ||
142 (port_info.reta_size > ETH_RSS_RETA_SIZE_512))
145 if ((rss->n_queues == 0) ||
146 (rss->n_queues >= LINK_RXQ_RSS_MAX))
149 for (i = 0; i < rss->n_queues; i++)
150 if (rss->queue_id[i] >= port_info.max_rx_queues)
158 memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
160 port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
161 if (port_info.flow_type_rss_offloads & ETH_RSS_IPV4)
162 port_conf.rx_adv_conf.rss_conf.rss_hf |=
164 if (port_info.flow_type_rss_offloads & ETH_RSS_IPV6)
165 port_conf.rx_adv_conf.rss_conf.rss_hf |=
169 cpu_id = (uint32_t) rte_eth_dev_socket_id(port_id);
170 if (cpu_id == (uint32_t) SOCKET_ID_ANY)
173 status = rte_eth_dev_configure(
182 if (params->promiscuous)
183 rte_eth_promiscuous_enable(port_id);
186 for (i = 0; i < params->rx.n_queues; i++) {
187 status = rte_eth_rx_queue_setup(
190 params->rx.queue_size,
200 for (i = 0; i < params->tx.n_queues; i++) {
201 status = rte_eth_tx_queue_setup(
204 params->tx.queue_size,
213 status = rte_eth_dev_start(port_id);
218 status = rss_setup(port_id, port_info.reta_size, rss);
221 rte_eth_dev_stop(port_id);
227 status = rte_eth_dev_set_link_up(port_id);
228 if ((status < 0) && (status != -ENOTSUP)) {
229 rte_eth_dev_stop(port_id);
233 /* Node allocation */
234 link = calloc(1, sizeof(struct link));
236 rte_eth_dev_stop(port_id);
241 strlcpy(link->name, name, sizeof(link->name));
242 link->port_id = port_id;
243 link->n_rxq = params->rx.n_queues;
244 link->n_txq = params->tx.n_queues;
246 /* Node add to list */
247 TAILQ_INSERT_TAIL(&link_list, link, node);
253 link_is_up(const char *name)
255 struct rte_eth_link link_params;
258 /* Check input params */
262 link = link_find(name);
267 rte_eth_link_get(link->port_id, &link_params);
269 return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;