1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
17 #include <sys/ioctl.h>
18 #include <sys/socket.h>
19 #include <netinet/in.h>
20 #include <linux/ethtool.h>
21 #include <linux/sockios.h>
27 #include <rte_atomic.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_bus_pci.h>
31 #include <rte_common.h>
32 #include <rte_interrupts.h>
33 #include <rte_malloc.h>
34 #include <rte_string_fns.h>
35 #include <rte_rwlock.h>
38 #include "mlx5_glue.h"
39 #include "mlx5_rxtx.h"
40 #include "mlx5_utils.h"
42 /* Supported speed values found in /usr/include/linux/ethtool.h */
43 #ifndef HAVE_SUPPORTED_40000baseKR4_Full
44 #define SUPPORTED_40000baseKR4_Full (1 << 23)
46 #ifndef HAVE_SUPPORTED_40000baseCR4_Full
47 #define SUPPORTED_40000baseCR4_Full (1 << 24)
49 #ifndef HAVE_SUPPORTED_40000baseSR4_Full
50 #define SUPPORTED_40000baseSR4_Full (1 << 25)
52 #ifndef HAVE_SUPPORTED_40000baseLR4_Full
53 #define SUPPORTED_40000baseLR4_Full (1 << 26)
55 #ifndef HAVE_SUPPORTED_56000baseKR4_Full
56 #define SUPPORTED_56000baseKR4_Full (1 << 27)
58 #ifndef HAVE_SUPPORTED_56000baseCR4_Full
59 #define SUPPORTED_56000baseCR4_Full (1 << 28)
61 #ifndef HAVE_SUPPORTED_56000baseSR4_Full
62 #define SUPPORTED_56000baseSR4_Full (1 << 29)
64 #ifndef HAVE_SUPPORTED_56000baseLR4_Full
65 #define SUPPORTED_56000baseLR4_Full (1 << 30)
68 /* Add defines in case the running kernel is not the same as user headers. */
69 #ifndef ETHTOOL_GLINKSETTINGS
70 struct ethtool_link_settings {
79 uint8_t eth_tp_mdix_ctrl;
80 int8_t link_mode_masks_nwords;
82 uint32_t link_mode_masks[];
85 #define ETHTOOL_GLINKSETTINGS 0x0000004c
86 #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
87 #define ETHTOOL_LINK_MODE_Autoneg_BIT 6
88 #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17
89 #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18
90 #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19
91 #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20
92 #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21
93 #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22
94 #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23
95 #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24
96 #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25
97 #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26
98 #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27
99 #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28
100 #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29
101 #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30
103 #ifndef HAVE_ETHTOOL_LINK_MODE_25G
104 #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31
105 #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32
106 #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33
108 #ifndef HAVE_ETHTOOL_LINK_MODE_50G
109 #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34
110 #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35
112 #ifndef HAVE_ETHTOOL_LINK_MODE_100G
113 #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36
114 #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37
115 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
116 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
120 * Get master interface name from private structure.
123 * Pointer to Ethernet device.
125 * Interface name output buffer.
128 * 0 on success, a negative errno value otherwise and rte_errno is set.
131 mlx5_get_master_ifname(const char *ibdev_path, char (*ifname)[IF_NAMESIZE])
135 unsigned int dev_type = 0;
136 unsigned int dev_port_prev = ~0u;
137 char match[IF_NAMESIZE] = "";
141 MKSTR(path, "%s/device/net", ibdev_path);
149 while ((dent = readdir(dir)) != NULL) {
150 char *name = dent->d_name;
152 unsigned int dev_port;
155 if ((name[0] == '.') &&
156 ((name[1] == '\0') ||
157 ((name[1] == '.') && (name[2] == '\0'))))
160 MKSTR(path, "%s/device/net/%s/%s",
162 (dev_type ? "dev_id" : "dev_port"));
164 file = fopen(path, "rb");
169 * Switch to dev_id when dev_port does not exist as
170 * is the case with Linux kernel versions < 3.15.
181 r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
186 * Switch to dev_id when dev_port returns the same value for
187 * all ports. May happen when using a MOFED release older than
188 * 3.0 with a Linux kernel >= 3.15.
190 if (dev_port == dev_port_prev)
192 dev_port_prev = dev_port;
194 strlcpy(match, name, sizeof(match));
197 if (match[0] == '\0') {
201 strncpy(*ifname, match, sizeof(*ifname));
206 * Get interface name from private structure.
208 * This is a port representor-aware version of mlx5_get_master_ifname().
211 * Pointer to Ethernet device.
213 * Interface name output buffer.
216 * 0 on success, a negative errno value otherwise and rte_errno is set.
219 mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
221 struct mlx5_priv *priv = dev->data->dev_private;
222 unsigned int ifindex;
226 ifindex = priv->nl_socket_rdma >= 0 ?
227 mlx5_nl_ifindex(priv->nl_socket_rdma,
228 priv->sh->ibdev_name,
231 if (!priv->representor)
232 return mlx5_get_master_ifname(priv->sh->ibdev_path,
237 if (if_indextoname(ifindex, &(*ifname)[0]))
244 * Get the interface index from device name.
247 * Pointer to Ethernet device.
250 * Nonzero interface index on success, zero otherwise and rte_errno is set.
253 mlx5_ifindex(const struct rte_eth_dev *dev)
255 char ifname[IF_NAMESIZE];
256 unsigned int ifindex;
258 if (mlx5_get_ifname(dev, &ifname))
260 ifindex = if_nametoindex(ifname);
267 * Perform ifreq ioctl() on associated Ethernet device.
270 * Pointer to Ethernet device.
272 * Request number to pass to ioctl().
274 * Interface request structure output buffer.
277 * 0 on success, a negative errno value otherwise and rte_errno is set.
280 mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr)
282 int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
289 ret = mlx5_get_ifname(dev, &ifr->ifr_name);
292 ret = ioctl(sock, req, ifr);
308 * Pointer to Ethernet device.
310 * MTU value output buffer.
313 * 0 on success, a negative errno value otherwise and rte_errno is set.
316 mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
318 struct ifreq request;
319 int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request);
323 *mtu = request.ifr_mtu;
331 * Pointer to Ethernet device.
336 * 0 on success, a negative errno value otherwise and rte_errno is set.
339 mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
341 struct ifreq request = { .ifr_mtu = mtu, };
343 return mlx5_ifreq(dev, SIOCSIFMTU, &request);
350 * Pointer to Ethernet device.
352 * Bitmask for flags that must remain untouched.
354 * Bitmask for flags to modify.
357 * 0 on success, a negative errno value otherwise and rte_errno is set.
360 mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
362 struct ifreq request;
363 int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request);
367 request.ifr_flags &= keep;
368 request.ifr_flags |= flags & ~keep;
369 return mlx5_ifreq(dev, SIOCSIFFLAGS, &request);
373 * DPDK callback for Ethernet device configuration.
376 * Pointer to Ethernet device structure.
379 * 0 on success, a negative errno value otherwise and rte_errno is set.
382 mlx5_dev_configure(struct rte_eth_dev *dev)
384 struct mlx5_priv *priv = dev->data->dev_private;
385 unsigned int rxqs_n = dev->data->nb_rx_queues;
386 unsigned int txqs_n = dev->data->nb_tx_queues;
389 unsigned int reta_idx_n;
390 const uint8_t use_app_rss_key =
391 !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
394 if (use_app_rss_key &&
395 (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
396 MLX5_RSS_HASH_KEY_LEN)) {
397 DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long",
398 dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN));
402 priv->rss_conf.rss_key =
403 rte_realloc(priv->rss_conf.rss_key,
404 MLX5_RSS_HASH_KEY_LEN, 0);
405 if (!priv->rss_conf.rss_key) {
406 DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
407 dev->data->port_id, rxqs_n);
411 memcpy(priv->rss_conf.rss_key,
413 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key :
414 rss_hash_default_key,
415 MLX5_RSS_HASH_KEY_LEN);
416 priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN;
417 priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
418 priv->rxqs = (void *)dev->data->rx_queues;
419 priv->txqs = (void *)dev->data->tx_queues;
420 if (txqs_n != priv->txqs_n) {
421 DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u",
422 dev->data->port_id, priv->txqs_n, txqs_n);
423 priv->txqs_n = txqs_n;
425 if (rxqs_n > priv->config.ind_table_max_size) {
426 DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
427 dev->data->port_id, rxqs_n);
431 if (rxqs_n == priv->rxqs_n)
433 DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
434 dev->data->port_id, priv->rxqs_n, rxqs_n);
435 priv->rxqs_n = rxqs_n;
436 /* If the requested number of RX queues is not a power of two, use the
437 * maximum indirection table size for better balancing.
438 * The result is always rounded to the next power of two. */
439 reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
440 priv->config.ind_table_max_size :
442 ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
445 /* When the number of RX queues is not a power of two, the remaining
446 * table entries are padded with reused WQs and hashes are not spread
448 for (i = 0, j = 0; (i != reta_idx_n); ++i) {
449 (*priv->reta_idx)[i] = j;
453 ret = mlx5_proc_priv_init(dev);
460 * Sets default tuning parameters.
463 * Pointer to Ethernet device.
465 * Info structure output buffer.
468 mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
470 struct mlx5_priv *priv = dev->data->dev_private;
472 /* Minimum CPU utilization. */
473 info->default_rxportconf.ring_size = 256;
474 info->default_txportconf.ring_size = 256;
475 info->default_rxportconf.burst_size = 64;
476 info->default_txportconf.burst_size = 64;
477 if (priv->link_speed_capa & ETH_LINK_SPEED_100G) {
478 info->default_rxportconf.nb_queues = 16;
479 info->default_txportconf.nb_queues = 16;
480 if (dev->data->nb_rx_queues > 2 ||
481 dev->data->nb_tx_queues > 2) {
482 /* Max Throughput. */
483 info->default_rxportconf.ring_size = 2048;
484 info->default_txportconf.ring_size = 2048;
487 info->default_rxportconf.nb_queues = 8;
488 info->default_txportconf.nb_queues = 8;
489 if (dev->data->nb_rx_queues > 2 ||
490 dev->data->nb_tx_queues > 2) {
491 /* Max Throughput. */
492 info->default_rxportconf.ring_size = 4096;
493 info->default_txportconf.ring_size = 4096;
499 * DPDK callback to get information about the device.
502 * Pointer to Ethernet device structure.
504 * Info structure output buffer.
507 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
509 struct mlx5_priv *priv = dev->data->dev_private;
510 struct mlx5_dev_config *config = &priv->config;
512 char ifname[IF_NAMESIZE];
514 /* FIXME: we should ask the device for these values. */
515 info->min_rx_bufsize = 32;
516 info->max_rx_pktlen = 65536;
518 * Since we need one CQ per QP, the limit is the minimum number
519 * between the two values.
521 max = RTE_MIN(priv->sh->device_attr.orig_attr.max_cq,
522 priv->sh->device_attr.orig_attr.max_qp);
523 /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
526 info->max_rx_queues = max;
527 info->max_tx_queues = max;
528 info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
529 info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
530 info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
531 info->rx_queue_offload_capa);
532 info->tx_offload_capa = mlx5_get_tx_port_offloads(dev);
533 if (mlx5_get_ifname(dev, &ifname) == 0)
534 info->if_index = if_nametoindex(ifname);
535 info->reta_size = priv->reta_idx_n ?
536 priv->reta_idx_n : config->ind_table_max_size;
537 info->hash_key_size = MLX5_RSS_HASH_KEY_LEN;
538 info->speed_capa = priv->link_speed_capa;
539 info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
540 mlx5_set_default_params(dev, info);
541 info->switch_info.name = dev->data->name;
542 info->switch_info.domain_id = priv->domain_id;
543 info->switch_info.port_id = priv->representor_id;
544 if (priv->representor) {
545 unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0);
548 i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
550 struct mlx5_priv *opriv =
551 rte_eth_devices[port_id[i]].data->dev_private;
554 opriv->representor ||
555 opriv->domain_id != priv->domain_id)
558 * Override switch name with that of the master
561 info->switch_info.name = opriv->dev_data->name;
568 * Get firmware version of a device.
571 * Ethernet device port.
573 * String output allocated by caller.
575 * Size of the output string, including terminating null byte.
578 * 0 on success, or the size of the non truncated string if too big.
580 int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
582 struct mlx5_priv *priv = dev->data->dev_private;
583 struct ibv_device_attr *attr = &priv->sh->device_attr.orig_attr;
584 size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1;
589 strlcpy(fw_ver, attr->fw_ver, fw_size);
594 * Get supported packet types.
597 * Pointer to Ethernet device structure.
600 * A pointer to the supported Packet types array.
603 mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
605 static const uint32_t ptypes[] = {
606 /* refers to rxq_cq_to_pkt_type() */
608 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
609 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
610 RTE_PTYPE_L4_NONFRAG,
614 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
615 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
616 RTE_PTYPE_INNER_L4_NONFRAG,
617 RTE_PTYPE_INNER_L4_FRAG,
618 RTE_PTYPE_INNER_L4_TCP,
619 RTE_PTYPE_INNER_L4_UDP,
623 if (dev->rx_pkt_burst == mlx5_rx_burst ||
624 dev->rx_pkt_burst == mlx5_rx_burst_mprq ||
625 dev->rx_pkt_burst == mlx5_rx_burst_vec)
631 * DPDK callback to retrieve physical link information.
634 * Pointer to Ethernet device structure.
636 * Storage for current link status.
639 * 0 on success, a negative errno value otherwise and rte_errno is set.
642 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
643 struct rte_eth_link *link)
645 struct mlx5_priv *priv = dev->data->dev_private;
646 struct ethtool_cmd edata = {
647 .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
650 struct rte_eth_link dev_link;
654 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
656 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
657 dev->data->port_id, strerror(rte_errno));
660 dev_link = (struct rte_eth_link) {
661 .link_status = ((ifr.ifr_flags & IFF_UP) &&
662 (ifr.ifr_flags & IFF_RUNNING)),
664 ifr = (struct ifreq) {
665 .ifr_data = (void *)&edata,
667 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
670 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
671 dev->data->port_id, strerror(rte_errno));
674 link_speed = ethtool_cmd_speed(&edata);
675 if (link_speed == -1)
676 dev_link.link_speed = ETH_SPEED_NUM_NONE;
678 dev_link.link_speed = link_speed;
679 priv->link_speed_capa = 0;
680 if (edata.supported & SUPPORTED_Autoneg)
681 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
682 if (edata.supported & (SUPPORTED_1000baseT_Full |
683 SUPPORTED_1000baseKX_Full))
684 priv->link_speed_capa |= ETH_LINK_SPEED_1G;
685 if (edata.supported & SUPPORTED_10000baseKR_Full)
686 priv->link_speed_capa |= ETH_LINK_SPEED_10G;
687 if (edata.supported & (SUPPORTED_40000baseKR4_Full |
688 SUPPORTED_40000baseCR4_Full |
689 SUPPORTED_40000baseSR4_Full |
690 SUPPORTED_40000baseLR4_Full))
691 priv->link_speed_capa |= ETH_LINK_SPEED_40G;
692 dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
693 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
694 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
695 ETH_LINK_SPEED_FIXED);
696 if (((dev_link.link_speed && !dev_link.link_status) ||
697 (!dev_link.link_speed && dev_link.link_status))) {
706 * Retrieve physical link information (unlocked version using new ioctl).
709 * Pointer to Ethernet device structure.
711 * Storage for current link status.
714 * 0 on success, a negative errno value otherwise and rte_errno is set.
717 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
718 struct rte_eth_link *link)
721 struct mlx5_priv *priv = dev->data->dev_private;
722 struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
724 struct rte_eth_link dev_link;
728 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
730 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
731 dev->data->port_id, strerror(rte_errno));
734 dev_link = (struct rte_eth_link) {
735 .link_status = ((ifr.ifr_flags & IFF_UP) &&
736 (ifr.ifr_flags & IFF_RUNNING)),
738 ifr = (struct ifreq) {
739 .ifr_data = (void *)&gcmd,
741 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
744 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
746 dev->data->port_id, strerror(rte_errno));
749 gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
751 alignas(struct ethtool_link_settings)
752 uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) +
753 sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3];
754 struct ethtool_link_settings *ecmd = (void *)data;
757 ifr.ifr_data = (void *)ecmd;
758 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
761 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
763 dev->data->port_id, strerror(rte_errno));
766 dev_link.link_speed = ecmd->speed;
767 sc = ecmd->link_mode_masks[0] |
768 ((uint64_t)ecmd->link_mode_masks[1] << 32);
769 priv->link_speed_capa = 0;
770 if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT))
771 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
772 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
773 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
774 priv->link_speed_capa |= ETH_LINK_SPEED_1G;
775 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
776 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
777 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
778 priv->link_speed_capa |= ETH_LINK_SPEED_10G;
779 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
780 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
781 priv->link_speed_capa |= ETH_LINK_SPEED_20G;
782 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
783 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
784 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
785 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
786 priv->link_speed_capa |= ETH_LINK_SPEED_40G;
787 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
788 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
789 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
790 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
791 priv->link_speed_capa |= ETH_LINK_SPEED_56G;
792 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
793 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
794 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
795 priv->link_speed_capa |= ETH_LINK_SPEED_25G;
796 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
797 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
798 priv->link_speed_capa |= ETH_LINK_SPEED_50G;
799 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
800 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
801 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
802 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
803 priv->link_speed_capa |= ETH_LINK_SPEED_100G;
804 dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
805 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
806 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
807 ETH_LINK_SPEED_FIXED);
808 if (((dev_link.link_speed && !dev_link.link_status) ||
809 (!dev_link.link_speed && dev_link.link_status))) {
818 * DPDK callback to retrieve physical link information.
821 * Pointer to Ethernet device structure.
822 * @param wait_to_complete
823 * Wait for request completion.
826 * 0 if link status was not updated, positive if it was, a negative errno
827 * value otherwise and rte_errno is set.
830 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
833 struct rte_eth_link dev_link;
834 time_t start_time = time(NULL);
837 ret = mlx5_link_update_unlocked_gs(dev, &dev_link);
839 ret = mlx5_link_update_unlocked_gset(dev, &dev_link);
842 /* Handle wait to complete situation. */
843 if (wait_to_complete && ret == -EAGAIN) {
844 if (abs((int)difftime(time(NULL), start_time)) <
845 MLX5_LINK_STATUS_TIMEOUT) {
852 } else if (ret < 0) {
855 } while (wait_to_complete);
856 ret = !!memcmp(&dev->data->dev_link, &dev_link,
857 sizeof(struct rte_eth_link));
858 dev->data->dev_link = dev_link;
863 * DPDK callback to change the MTU.
866 * Pointer to Ethernet device structure.
871 * 0 on success, a negative errno value otherwise and rte_errno is set.
874 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
876 struct mlx5_priv *priv = dev->data->dev_private;
877 uint16_t kern_mtu = 0;
880 ret = mlx5_get_mtu(dev, &kern_mtu);
883 /* Set kernel interface MTU first. */
884 ret = mlx5_set_mtu(dev, mtu);
887 ret = mlx5_get_mtu(dev, &kern_mtu);
890 if (kern_mtu == mtu) {
892 DRV_LOG(DEBUG, "port %u adapter MTU set to %u",
893 dev->data->port_id, mtu);
901 * DPDK callback to get flow control status.
904 * Pointer to Ethernet device structure.
905 * @param[out] fc_conf
906 * Flow control output buffer.
909 * 0 on success, a negative errno value otherwise and rte_errno is set.
912 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
915 struct ethtool_pauseparam ethpause = {
916 .cmd = ETHTOOL_GPAUSEPARAM
920 ifr.ifr_data = (void *)ðpause;
921 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
924 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
926 dev->data->port_id, strerror(rte_errno));
929 fc_conf->autoneg = ethpause.autoneg;
930 if (ethpause.rx_pause && ethpause.tx_pause)
931 fc_conf->mode = RTE_FC_FULL;
932 else if (ethpause.rx_pause)
933 fc_conf->mode = RTE_FC_RX_PAUSE;
934 else if (ethpause.tx_pause)
935 fc_conf->mode = RTE_FC_TX_PAUSE;
937 fc_conf->mode = RTE_FC_NONE;
942 * DPDK callback to modify flow control parameters.
945 * Pointer to Ethernet device structure.
947 * Flow control parameters.
950 * 0 on success, a negative errno value otherwise and rte_errno is set.
953 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
956 struct ethtool_pauseparam ethpause = {
957 .cmd = ETHTOOL_SPAUSEPARAM
961 ifr.ifr_data = (void *)ðpause;
962 ethpause.autoneg = fc_conf->autoneg;
963 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
964 (fc_conf->mode & RTE_FC_RX_PAUSE))
965 ethpause.rx_pause = 1;
967 ethpause.rx_pause = 0;
969 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
970 (fc_conf->mode & RTE_FC_TX_PAUSE))
971 ethpause.tx_pause = 1;
973 ethpause.tx_pause = 0;
974 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
977 "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
979 dev->data->port_id, strerror(rte_errno));
986 * Get PCI information from struct ibv_device.
989 * Pointer to Ethernet device structure.
990 * @param[out] pci_addr
991 * PCI bus address output buffer.
994 * 0 on success, a negative errno value otherwise and rte_errno is set.
997 mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
998 struct rte_pci_addr *pci_addr)
1002 MKSTR(path, "%s/device/uevent", device->ibdev_path);
1004 file = fopen(path, "rb");
1009 while (fgets(line, sizeof(line), file) == line) {
1010 size_t len = strlen(line);
1013 /* Truncate long lines. */
1014 if (len == (sizeof(line) - 1))
1015 while (line[(len - 1)] != '\n') {
1019 line[(len - 1)] = ret;
1021 /* Extract information. */
1024 "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
1028 &pci_addr->function) == 4) {
1038 * Handle shared asynchronous events the NIC (removal event
1039 * and link status change). Supports multiport IB device.
1042 * Callback argument.
1045 mlx5_dev_interrupt_handler(void *cb_arg)
1047 struct mlx5_ibv_shared *sh = cb_arg;
1048 struct ibv_async_event event;
1050 /* Read all message from the IB device and acknowledge them. */
1052 struct rte_eth_dev *dev;
1055 if (mlx5_glue->get_async_event(sh->ctx, &event))
1057 /* Retrieve and check IB port index. */
1058 tmp = (uint32_t)event.element.port_num;
1059 assert(tmp && (tmp <= sh->max_port));
1061 tmp > sh->max_port ||
1062 sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) {
1064 * Invalid IB port index or no handler
1065 * installed for this port.
1067 mlx5_glue->ack_async_event(&event);
1070 /* Retrieve ethernet device descriptor. */
1071 tmp = sh->port[tmp - 1].ih_port_id;
1072 dev = &rte_eth_devices[tmp];
1075 if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
1076 event.event_type == IBV_EVENT_PORT_ERR) &&
1077 dev->data->dev_conf.intr_conf.lsc) {
1078 mlx5_glue->ack_async_event(&event);
1079 if (mlx5_link_update(dev, 0) == -EAGAIN) {
1083 _rte_eth_dev_callback_process
1084 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1087 if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
1088 dev->data->dev_conf.intr_conf.rmv) {
1089 mlx5_glue->ack_async_event(&event);
1090 _rte_eth_dev_callback_process
1091 (dev, RTE_ETH_EVENT_INTR_RMV, NULL);
1095 "port %u event type %d on not handled",
1096 dev->data->port_id, event.event_type);
1097 mlx5_glue->ack_async_event(&event);
1102 * Uninstall shared asynchronous device events handler.
1103 * This function is implemeted to support event sharing
1104 * between multiple ports of single IB device.
1107 * Pointer to Ethernet device.
1110 mlx5_dev_shared_handler_uninstall(struct rte_eth_dev *dev)
1112 struct mlx5_priv *priv = dev->data->dev_private;
1113 struct mlx5_ibv_shared *sh = priv->sh;
1115 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1117 pthread_mutex_lock(&sh->intr_mutex);
1118 assert(priv->ibv_port);
1119 assert(priv->ibv_port <= sh->max_port);
1120 assert(dev->data->port_id < RTE_MAX_ETHPORTS);
1121 if (sh->port[priv->ibv_port - 1].ih_port_id >= RTE_MAX_ETHPORTS)
1123 assert(sh->port[priv->ibv_port - 1].ih_port_id ==
1124 (uint32_t)dev->data->port_id);
1125 assert(sh->intr_cnt);
1126 sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
1127 if (!sh->intr_cnt || --sh->intr_cnt)
1129 rte_intr_callback_unregister(&sh->intr_handle,
1130 mlx5_dev_interrupt_handler, sh);
1131 sh->intr_handle.fd = 0;
1132 sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
1134 pthread_mutex_unlock(&sh->intr_mutex);
1138 * Install shared asyncronous device events handler.
1139 * This function is implemeted to support event sharing
1140 * between multiple ports of single IB device.
1143 * Pointer to Ethernet device.
1146 mlx5_dev_shared_handler_install(struct rte_eth_dev *dev)
1148 struct mlx5_priv *priv = dev->data->dev_private;
1149 struct mlx5_ibv_shared *sh = priv->sh;
1153 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1155 pthread_mutex_lock(&sh->intr_mutex);
1156 assert(priv->ibv_port);
1157 assert(priv->ibv_port <= sh->max_port);
1158 assert(dev->data->port_id < RTE_MAX_ETHPORTS);
1159 if (sh->port[priv->ibv_port - 1].ih_port_id < RTE_MAX_ETHPORTS) {
1160 /* The handler is already installed for this port. */
1161 assert(sh->intr_cnt);
1164 sh->port[priv->ibv_port - 1].ih_port_id = (uint32_t)dev->data->port_id;
1169 /* No shared handler installed. */
1170 assert(sh->ctx->async_fd > 0);
1171 flags = fcntl(sh->ctx->async_fd, F_GETFL);
1172 ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
1174 DRV_LOG(INFO, "failed to change file descriptor"
1175 " async event queue");
1176 /* Indicate there will be no interrupts. */
1177 dev->data->dev_conf.intr_conf.lsc = 0;
1178 dev->data->dev_conf.intr_conf.rmv = 0;
1179 sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
1182 sh->intr_handle.fd = sh->ctx->async_fd;
1183 sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
1184 rte_intr_callback_register(&sh->intr_handle,
1185 mlx5_dev_interrupt_handler, sh);
1188 pthread_mutex_unlock(&sh->intr_mutex);
1192 * Uninstall interrupt handler.
1195 * Pointer to Ethernet device.
1198 mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
1200 mlx5_dev_shared_handler_uninstall(dev);
1204 * Install interrupt handler.
1207 * Pointer to Ethernet device.
1210 mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
1212 mlx5_dev_shared_handler_install(dev);
1216 * DPDK callback to bring the link DOWN.
1219 * Pointer to Ethernet device structure.
1222 * 0 on success, a negative errno value otherwise and rte_errno is set.
1225 mlx5_set_link_down(struct rte_eth_dev *dev)
1227 return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP);
1231 * DPDK callback to bring the link UP.
1234 * Pointer to Ethernet device structure.
1237 * 0 on success, a negative errno value otherwise and rte_errno is set.
1240 mlx5_set_link_up(struct rte_eth_dev *dev)
1242 return mlx5_set_flags(dev, ~IFF_UP, IFF_UP);
1246 * Configure the TX function to use.
1249 * Pointer to private data structure.
1252 * Pointer to selected Tx burst function.
1255 mlx5_select_tx_function(struct rte_eth_dev *dev)
1257 struct mlx5_priv *priv = dev->data->dev_private;
1258 eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
1259 struct mlx5_dev_config *config = &priv->config;
1260 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
1261 int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
1262 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1263 DEV_TX_OFFLOAD_GRE_TNL_TSO |
1264 DEV_TX_OFFLOAD_IP_TNL_TSO |
1265 DEV_TX_OFFLOAD_UDP_TNL_TSO));
1266 int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
1267 DEV_TX_OFFLOAD_UDP_TNL_TSO |
1268 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM));
1269 int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT);
1271 assert(priv != NULL);
1272 /* Select appropriate TX function. */
1273 if (vlan_insert || tso || swp)
1274 return tx_pkt_burst;
1275 if (config->mps == MLX5_MPW_ENHANCED) {
1276 if (mlx5_check_vec_tx_support(dev) > 0) {
1277 if (mlx5_check_raw_vec_tx_support(dev) > 0)
1278 tx_pkt_burst = mlx5_tx_burst_raw_vec;
1280 tx_pkt_burst = mlx5_tx_burst_vec;
1282 "port %u selected enhanced MPW Tx vectorized"
1284 dev->data->port_id);
1286 tx_pkt_burst = mlx5_tx_burst_empw;
1288 "port %u selected enhanced MPW Tx function",
1289 dev->data->port_id);
1291 } else if (config->mps && (config->txq_inline > 0)) {
1292 tx_pkt_burst = mlx5_tx_burst_mpw_inline;
1293 DRV_LOG(DEBUG, "port %u selected MPW inline Tx function",
1294 dev->data->port_id);
1295 } else if (config->mps) {
1296 tx_pkt_burst = mlx5_tx_burst_mpw;
1297 DRV_LOG(DEBUG, "port %u selected MPW Tx function",
1298 dev->data->port_id);
1300 return tx_pkt_burst;
1304 * Configure the RX function to use.
1307 * Pointer to private data structure.
1310 * Pointer to selected Rx burst function.
1313 mlx5_select_rx_function(struct rte_eth_dev *dev)
1315 eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
1317 assert(dev != NULL);
1318 if (mlx5_check_vec_rx_support(dev) > 0) {
1319 rx_pkt_burst = mlx5_rx_burst_vec;
1320 DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
1321 dev->data->port_id);
1322 } else if (mlx5_mprq_enabled(dev)) {
1323 rx_pkt_burst = mlx5_rx_burst_mprq;
1325 return rx_pkt_burst;
1329 * Check if mlx5 device was removed.
1332 * Pointer to Ethernet device structure.
1335 * 1 when device is removed, otherwise 0.
1338 mlx5_is_removed(struct rte_eth_dev *dev)
1340 struct ibv_device_attr device_attr;
1341 struct mlx5_priv *priv = dev->data->dev_private;
1343 if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO)
1349 * Get port ID list of mlx5 instances sharing a common device.
1352 * Device to look for.
1353 * @param[out] port_list
1354 * Result buffer for collected port IDs.
1355 * @param port_list_n
1356 * Maximum number of entries in result buffer. If 0, @p port_list can be
1360 * Number of matching instances regardless of the @p port_list_n
1361 * parameter, 0 if none were found.
1364 mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list,
1365 unsigned int port_list_n)
1370 RTE_ETH_FOREACH_DEV_OF(id, dev) {
1371 if (n < port_list_n)
1379 * Get switch information associated with network interface.
1382 * Network interface index.
1384 * Switch information object, populated in case of success.
1387 * 0 on success, a negative errno value otherwise and rte_errno is set.
1390 mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info)
1392 char ifname[IF_NAMESIZE];
1393 char port_name[IF_NAMESIZE];
1395 struct mlx5_switch_info data = {
1403 bool port_name_set = false;
1404 bool port_switch_id_set = false;
1405 bool device_dir = false;
1409 if (!if_indextoname(ifindex, ifname)) {
1414 MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name",
1416 MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id",
1418 MKSTR(pci_device, "/sys/class/net/%s/device",
1421 file = fopen(phys_port_name, "rb");
1423 ret = fscanf(file, "%s", port_name);
1426 port_name_set = mlx5_translate_port_name(port_name,
1429 file = fopen(phys_switch_id, "rb");
1434 port_switch_id_set =
1435 fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 &&
1438 dir = opendir(pci_device);
1443 data.master = port_switch_id_set && (!port_name_set || device_dir);
1444 data.representor = port_switch_id_set && port_name_set && !device_dir;
1446 assert(!(data.master && data.representor));
1447 if (data.master && data.representor) {
1448 DRV_LOG(ERR, "ifindex %u device is recognized as master"
1449 " and as representor", ifindex);
1457 * Extract port name, as a number, from sysfs or netlink information.
1459 * @param[in] port_name_in
1460 * String representing the port name.
1461 * @param[out] port_info_out
1462 * Port information, including port name as a number.
1465 * true on success, false otherwise.
1468 mlx5_translate_port_name(const char *port_name_in,
1469 struct mlx5_switch_info *port_info_out)
1471 char pf_c1, pf_c2, vf_c1, vf_c2;
1474 bool port_name_set = false;
1477 * Check for port-name as a string of the form pf0vf0
1478 * (support kernel ver >= 5.0)
1480 port_name_set = (sscanf(port_name_in, "%c%c%d%c%c%d", &pf_c1, &pf_c2,
1481 &pf_num, &vf_c1, &vf_c2,
1482 &port_info_out->port_name) == 6);
1483 if (port_name_set) {
1484 port_info_out->port_name_new = 1;
1486 /* Check for port-name as a number (support kernel ver < 5.0 */
1488 port_info_out->port_name = strtol(port_name_in, &end, 0);
1490 (size_t)(end - port_name_in) == strlen(port_name_in))
1491 port_name_set = true;
1493 return port_name_set;