1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
13 #include <rte_atomic.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_bus_pci.h>
17 #include <rte_common.h>
18 #include <rte_interrupts.h>
19 #include <rte_malloc.h>
20 #include <rte_string_fns.h>
21 #include <rte_rwlock.h>
22 #include <rte_cycles.h>
24 #include "mlx5_rxtx.h"
25 #include "mlx5_autoconf.h"
28 * Get the interface index from device name.
31 * Pointer to Ethernet device.
34 * Nonzero interface index on success, zero otherwise and rte_errno is set.
37 mlx5_ifindex(const struct rte_eth_dev *dev)
39 struct mlx5_priv *priv = dev->data->dev_private;
43 MLX5_ASSERT(priv->if_index);
44 ifindex = priv->if_index;
51 * DPDK callback for Ethernet device configuration.
54 * Pointer to Ethernet device structure.
57 * 0 on success, a negative errno value otherwise and rte_errno is set.
60 mlx5_dev_configure(struct rte_eth_dev *dev)
62 struct mlx5_priv *priv = dev->data->dev_private;
63 unsigned int rxqs_n = dev->data->nb_rx_queues;
64 unsigned int txqs_n = dev->data->nb_tx_queues;
65 const uint8_t use_app_rss_key =
66 !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
69 if (use_app_rss_key &&
70 (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
71 MLX5_RSS_HASH_KEY_LEN)) {
72 DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long",
73 dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN));
77 priv->rss_conf.rss_key =
78 rte_realloc(priv->rss_conf.rss_key,
79 MLX5_RSS_HASH_KEY_LEN, 0);
80 if (!priv->rss_conf.rss_key) {
81 DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
82 dev->data->port_id, rxqs_n);
87 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
88 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
90 memcpy(priv->rss_conf.rss_key,
92 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key :
94 MLX5_RSS_HASH_KEY_LEN);
95 priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN;
96 priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
97 priv->rxqs = (void *)dev->data->rx_queues;
98 priv->txqs = (void *)dev->data->tx_queues;
99 if (txqs_n != priv->txqs_n) {
100 DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u",
101 dev->data->port_id, priv->txqs_n, txqs_n);
102 priv->txqs_n = txqs_n;
104 if (rxqs_n > priv->config.ind_table_max_size) {
105 DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
106 dev->data->port_id, rxqs_n);
110 if (rxqs_n != priv->rxqs_n) {
111 DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
112 dev->data->port_id, priv->rxqs_n, rxqs_n);
113 priv->rxqs_n = rxqs_n;
115 priv->skip_default_rss_reta = 0;
116 ret = mlx5_proc_priv_init(dev);
123 * Configure default RSS reta.
126 * Pointer to Ethernet device structure.
129 * 0 on success, a negative errno value otherwise and rte_errno is set.
132 mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev)
134 struct mlx5_priv *priv = dev->data->dev_private;
135 unsigned int rxqs_n = dev->data->nb_rx_queues;
138 unsigned int reta_idx_n;
140 unsigned int *rss_queue_arr = NULL;
141 unsigned int rss_queue_n = 0;
143 if (priv->skip_default_rss_reta)
145 rss_queue_arr = rte_malloc("", rxqs_n * sizeof(unsigned int), 0);
146 if (!rss_queue_arr) {
147 DRV_LOG(ERR, "port %u cannot allocate RSS queue list (%u)",
148 dev->data->port_id, rxqs_n);
152 for (i = 0, j = 0; i < rxqs_n; i++) {
153 struct mlx5_rxq_data *rxq_data;
154 struct mlx5_rxq_ctrl *rxq_ctrl;
156 rxq_data = (*priv->rxqs)[i];
157 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
158 if (rxq_ctrl && rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
159 rss_queue_arr[j++] = i;
162 if (rss_queue_n > priv->config.ind_table_max_size) {
163 DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
164 dev->data->port_id, rss_queue_n);
166 rte_free(rss_queue_arr);
169 DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
170 dev->data->port_id, priv->rxqs_n, rxqs_n);
171 priv->rxqs_n = rxqs_n;
173 * If the requested number of RX queues is not a power of two,
174 * use the maximum indirection table size for better balancing.
175 * The result is always rounded to the next power of two.
177 reta_idx_n = (1 << log2above((rss_queue_n & (rss_queue_n - 1)) ?
178 priv->config.ind_table_max_size :
180 ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
182 rte_free(rss_queue_arr);
186 * When the number of RX queues is not a power of two,
187 * the remaining table entries are padded with reused WQs
188 * and hashes are not spread uniformly.
190 for (i = 0, j = 0; (i != reta_idx_n); ++i) {
191 (*priv->reta_idx)[i] = rss_queue_arr[j];
192 if (++j == rss_queue_n)
195 rte_free(rss_queue_arr);
200 * Sets default tuning parameters.
203 * Pointer to Ethernet device.
205 * Info structure output buffer.
208 mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
210 struct mlx5_priv *priv = dev->data->dev_private;
212 /* Minimum CPU utilization. */
213 info->default_rxportconf.ring_size = 256;
214 info->default_txportconf.ring_size = 256;
215 info->default_rxportconf.burst_size = MLX5_RX_DEFAULT_BURST;
216 info->default_txportconf.burst_size = MLX5_TX_DEFAULT_BURST;
217 if ((priv->link_speed_capa & ETH_LINK_SPEED_200G) |
218 (priv->link_speed_capa & ETH_LINK_SPEED_100G)) {
219 info->default_rxportconf.nb_queues = 16;
220 info->default_txportconf.nb_queues = 16;
221 if (dev->data->nb_rx_queues > 2 ||
222 dev->data->nb_tx_queues > 2) {
223 /* Max Throughput. */
224 info->default_rxportconf.ring_size = 2048;
225 info->default_txportconf.ring_size = 2048;
228 info->default_rxportconf.nb_queues = 8;
229 info->default_txportconf.nb_queues = 8;
230 if (dev->data->nb_rx_queues > 2 ||
231 dev->data->nb_tx_queues > 2) {
232 /* Max Throughput. */
233 info->default_rxportconf.ring_size = 4096;
234 info->default_txportconf.ring_size = 4096;
240 * Sets tx mbuf limiting parameters.
243 * Pointer to Ethernet device.
245 * Info structure output buffer.
248 mlx5_set_txlimit_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
250 struct mlx5_priv *priv = dev->data->dev_private;
251 struct mlx5_dev_config *config = &priv->config;
255 inlen = (config->txq_inline_max == MLX5_ARG_UNSET) ?
256 MLX5_SEND_DEF_INLINE_LEN :
257 (unsigned int)config->txq_inline_max;
258 MLX5_ASSERT(config->txq_inline_min >= 0);
259 inlen = RTE_MAX(inlen, (unsigned int)config->txq_inline_min);
260 inlen = RTE_MIN(inlen, MLX5_WQE_SIZE_MAX +
261 MLX5_ESEG_MIN_INLINE_SIZE -
264 MLX5_WQE_DSEG_SIZE * 2);
265 nb_max = (MLX5_WQE_SIZE_MAX +
266 MLX5_ESEG_MIN_INLINE_SIZE -
270 inlen) / MLX5_WSEG_SIZE;
271 info->tx_desc_lim.nb_seg_max = nb_max;
272 info->tx_desc_lim.nb_mtu_seg_max = nb_max;
276 * DPDK callback to get information about the device.
279 * Pointer to Ethernet device structure.
281 * Info structure output buffer.
284 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
286 struct mlx5_priv *priv = dev->data->dev_private;
287 struct mlx5_dev_config *config = &priv->config;
290 /* FIXME: we should ask the device for these values. */
291 info->min_rx_bufsize = 32;
292 info->max_rx_pktlen = 65536;
293 info->max_lro_pkt_size = MLX5_MAX_LRO_SIZE;
295 * Since we need one CQ per QP, the limit is the minimum number
296 * between the two values.
298 max = RTE_MIN(priv->sh->device_attr.max_cq,
299 priv->sh->device_attr.max_qp);
300 /* max_rx_queues is uint16_t. */
301 max = RTE_MIN(max, (unsigned int)UINT16_MAX);
302 info->max_rx_queues = max;
303 info->max_tx_queues = max;
304 info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
305 info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
306 info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
307 info->rx_queue_offload_capa);
308 info->tx_offload_capa = mlx5_get_tx_port_offloads(dev);
309 info->if_index = mlx5_ifindex(dev);
310 info->reta_size = priv->reta_idx_n ?
311 priv->reta_idx_n : config->ind_table_max_size;
312 info->hash_key_size = MLX5_RSS_HASH_KEY_LEN;
313 info->speed_capa = priv->link_speed_capa;
314 info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
315 mlx5_set_default_params(dev, info);
316 mlx5_set_txlimit_params(dev, info);
317 info->switch_info.name = dev->data->name;
318 info->switch_info.domain_id = priv->domain_id;
319 info->switch_info.port_id = priv->representor_id;
320 if (priv->representor) {
323 if (priv->pf_bond >= 0) {
325 * Switch port ID is opaque value with driver defined
326 * format. Push the PF index in bonding configurations
327 * in upper four bits of port ID. If we get too many
328 * representors (more than 4K) or PFs (more than 15)
329 * this approach must be reconsidered.
331 if ((info->switch_info.port_id >>
332 MLX5_PORT_ID_BONDING_PF_SHIFT) ||
333 priv->pf_bond > MLX5_PORT_ID_BONDING_PF_MASK) {
334 DRV_LOG(ERR, "can't update switch port ID"
335 " for bonding device");
339 info->switch_info.port_id |=
340 priv->pf_bond << MLX5_PORT_ID_BONDING_PF_SHIFT;
342 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
343 struct mlx5_priv *opriv =
344 rte_eth_devices[port_id].data->dev_private;
347 opriv->representor ||
348 opriv->sh != priv->sh ||
349 opriv->domain_id != priv->domain_id)
352 * Override switch name with that of the master
355 info->switch_info.name = opriv->dev_data->name;
363 * Get firmware version of a device.
366 * Ethernet device port.
368 * String output allocated by caller.
370 * Size of the output string, including terminating null byte.
373 * 0 on success, or the size of the non truncated string if too big.
376 mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
378 struct mlx5_priv *priv = dev->data->dev_private;
379 struct mlx5_dev_attr *attr = &priv->sh->device_attr;
380 size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1;
385 strlcpy(fw_ver, attr->fw_ver, fw_size);
390 * Get supported packet types.
393 * Pointer to Ethernet device structure.
396 * A pointer to the supported Packet types array.
399 mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
401 static const uint32_t ptypes[] = {
402 /* refers to rxq_cq_to_pkt_type() */
404 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
405 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
406 RTE_PTYPE_L4_NONFRAG,
410 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
411 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
412 RTE_PTYPE_INNER_L4_NONFRAG,
413 RTE_PTYPE_INNER_L4_FRAG,
414 RTE_PTYPE_INNER_L4_TCP,
415 RTE_PTYPE_INNER_L4_UDP,
419 if (dev->rx_pkt_burst == mlx5_rx_burst ||
420 dev->rx_pkt_burst == mlx5_rx_burst_mprq ||
421 dev->rx_pkt_burst == mlx5_rx_burst_vec)
427 * DPDK callback to change the MTU.
430 * Pointer to Ethernet device structure.
435 * 0 on success, a negative errno value otherwise and rte_errno is set.
438 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
440 struct mlx5_priv *priv = dev->data->dev_private;
441 uint16_t kern_mtu = 0;
444 ret = mlx5_get_mtu(dev, &kern_mtu);
447 /* Set kernel interface MTU first. */
448 ret = mlx5_set_mtu(dev, mtu);
451 ret = mlx5_get_mtu(dev, &kern_mtu);
454 if (kern_mtu == mtu) {
456 DRV_LOG(DEBUG, "port %u adapter MTU set to %u",
457 dev->data->port_id, mtu);
465 * Configure the RX function to use.
468 * Pointer to private data structure.
471 * Pointer to selected Rx burst function.
474 mlx5_select_rx_function(struct rte_eth_dev *dev)
476 eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
478 MLX5_ASSERT(dev != NULL);
479 if (mlx5_check_vec_rx_support(dev) > 0) {
480 rx_pkt_burst = mlx5_rx_burst_vec;
481 DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
483 } else if (mlx5_mprq_enabled(dev)) {
484 rx_pkt_burst = mlx5_rx_burst_mprq;
490 * Get the E-Switch parameters by port id.
495 * Device port id is valid, skip check. This flag is useful
496 * when trials are performed from probing and device is not
497 * flagged as valid yet (in attaching process).
498 * @param[out] es_domain_id
499 * E-Switch domain id.
500 * @param[out] es_port_id
501 * The port id of the port in the E-Switch.
504 * pointer to device private data structure containing data needed
505 * on success, NULL otherwise and rte_errno is set.
508 mlx5_port_to_eswitch_info(uint16_t port, bool valid)
510 struct rte_eth_dev *dev;
511 struct mlx5_priv *priv;
513 if (port >= RTE_MAX_ETHPORTS) {
517 if (!valid && !rte_eth_dev_is_valid_port(port)) {
521 dev = &rte_eth_devices[port];
522 priv = dev->data->dev_private;
523 if (!(priv->representor || priv->master)) {
531 * Get the E-Switch parameters by device instance.
535 * @param[out] es_domain_id
536 * E-Switch domain id.
537 * @param[out] es_port_id
538 * The port id of the port in the E-Switch.
541 * pointer to device private data structure containing data needed
542 * on success, NULL otherwise and rte_errno is set.
545 mlx5_dev_to_eswitch_info(struct rte_eth_dev *dev)
547 struct mlx5_priv *priv;
549 priv = dev->data->dev_private;
550 if (!(priv->representor || priv->master)) {
558 * DPDK callback to retrieve hairpin capabilities.
561 * Pointer to Ethernet device structure.
563 * Storage for hairpin capability data.
566 * 0 on success, a negative errno value otherwise and rte_errno is set.
569 mlx5_hairpin_cap_get(struct rte_eth_dev *dev,
570 struct rte_eth_hairpin_cap *cap)
572 struct mlx5_priv *priv = dev->data->dev_private;
574 if (priv->sh->devx == 0) {
578 cap->max_nb_queues = UINT16_MAX;
579 cap->max_rx_2_tx = 1;
580 cap->max_tx_2_rx = 1;
581 cap->max_nb_desc = 8192;