1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
8 #include <rte_ethdev.h>
9 #include <rte_string_fns.h>
14 static struct link_list link_list;
19 TAILQ_INIT(&link_list);
25 link_find(const char *name)
32 TAILQ_FOREACH(link, &link_list, node)
33 if (strcmp(link->name, name) == 0)
39 static struct rte_eth_conf port_conf_default = {
42 .mq_mode = ETH_MQ_RX_NONE,
44 .header_split = 0, /* Header split */
45 .hw_ip_checksum = 0, /* IP checksum offload */
46 .hw_vlan_filter = 0, /* VLAN filtering */
47 .hw_vlan_strip = 0, /* VLAN strip */
48 .hw_vlan_extend = 0, /* Extended VLAN */
49 .jumbo_frame = 0, /* Jumbo frame support */
50 .hw_strip_crc = 1, /* CRC strip by HW */
51 .enable_scatter = 0, /* Scattered packets RX handler */
53 .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
54 .split_hdr_size = 0, /* Header split buffer size */
64 .mq_mode = ETH_MQ_TX_NONE,
69 #define RETA_CONF_SIZE (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
72 rss_setup(uint16_t port_id,
74 struct link_params_rss *rss)
76 struct rte_eth_rss_reta_entry64 reta_conf[RETA_CONF_SIZE];
81 memset(reta_conf, 0, sizeof(reta_conf));
83 for (i = 0; i < reta_size; i++)
84 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
86 for (i = 0; i < reta_size; i++) {
87 uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
88 uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
89 uint32_t rss_qs_pos = i % rss->n_queues;
91 reta_conf[reta_id].reta[reta_pos] =
92 (uint16_t) rss->queue_id[rss_qs_pos];
96 status = rte_eth_dev_rss_reta_update(port_id,
104 link_create(const char *name, struct link_params *params)
106 struct rte_eth_dev_info port_info;
107 struct rte_eth_conf port_conf;
109 struct link_params_rss *rss;
110 struct mempool *mempool;
115 /* Check input params */
116 if ((name == NULL) ||
119 (params->rx.n_queues == 0) ||
120 (params->rx.queue_size == 0) ||
121 (params->tx.n_queues == 0) ||
122 (params->tx.queue_size == 0))
125 port_id = params->port_id;
126 if (params->dev_name) {
127 status = rte_eth_dev_get_port_by_name(params->dev_name,
133 if (!rte_eth_dev_is_valid_port(port_id))
136 rte_eth_dev_info_get(port_id, &port_info);
138 mempool = mempool_find(params->rx.mempool_name);
142 rss = params->rx.rss;
144 if ((port_info.reta_size == 0) ||
145 (port_info.reta_size > ETH_RSS_RETA_SIZE_512))
148 if ((rss->n_queues == 0) ||
149 (rss->n_queues >= LINK_RXQ_RSS_MAX))
152 for (i = 0; i < rss->n_queues; i++)
153 if (rss->queue_id[i] >= port_info.max_rx_queues)
161 memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
163 port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
164 port_conf.rx_adv_conf.rss_conf.rss_hf =
165 ETH_RSS_IPV4 | ETH_RSS_IPV6;
168 cpu_id = (uint32_t) rte_eth_dev_socket_id(port_id);
169 if (cpu_id == (uint32_t) SOCKET_ID_ANY)
172 status = rte_eth_dev_configure(
181 if (params->promiscuous)
182 rte_eth_promiscuous_enable(port_id);
185 for (i = 0; i < params->rx.n_queues; i++) {
186 status = rte_eth_rx_queue_setup(
189 params->rx.queue_size,
199 for (i = 0; i < params->tx.n_queues; i++) {
200 status = rte_eth_tx_queue_setup(
203 params->tx.queue_size,
212 status = rte_eth_dev_start(port_id);
217 status = rss_setup(port_id, port_info.reta_size, rss);
220 rte_eth_dev_stop(port_id);
226 status = rte_eth_dev_set_link_up(port_id);
227 if ((status < 0) && (status != -ENOTSUP)) {
228 rte_eth_dev_stop(port_id);
232 /* Node allocation */
233 link = calloc(1, sizeof(struct link));
235 rte_eth_dev_stop(port_id);
240 strlcpy(link->name, name, sizeof(link->name));
241 link->port_id = port_id;
242 link->n_rxq = params->rx.n_queues;
243 link->n_txq = params->tx.n_queues;
245 /* Node add to list */
246 TAILQ_INSERT_TAIL(&link_list, link, node);
252 link_is_up(const char *name)
254 struct rte_eth_link link_params;
257 /* Check input params */
261 link = link_find(name);
266 rte_eth_link_get(link->port_id, &link_params);
268 return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;