1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
17 #include <sys/ioctl.h>
18 #include <sys/socket.h>
19 #include <netinet/in.h>
20 #include <linux/ethtool.h>
21 #include <linux/sockios.h>
27 #include <rte_atomic.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_bus_pci.h>
31 #include <rte_common.h>
32 #include <rte_interrupts.h>
33 #include <rte_malloc.h>
34 #include <rte_string_fns.h>
35 #include <rte_rwlock.h>
36 #include <rte_cycles.h>
38 #include <mlx5_glue.h>
39 #include <mlx5_devx_cmds.h>
40 #include <mlx5_common.h>
43 #include "mlx5_rxtx.h"
44 #include "mlx5_utils.h"
46 /* Supported speed values found in /usr/include/linux/ethtool.h */
47 #ifndef HAVE_SUPPORTED_40000baseKR4_Full
48 #define SUPPORTED_40000baseKR4_Full (1 << 23)
50 #ifndef HAVE_SUPPORTED_40000baseCR4_Full
51 #define SUPPORTED_40000baseCR4_Full (1 << 24)
53 #ifndef HAVE_SUPPORTED_40000baseSR4_Full
54 #define SUPPORTED_40000baseSR4_Full (1 << 25)
56 #ifndef HAVE_SUPPORTED_40000baseLR4_Full
57 #define SUPPORTED_40000baseLR4_Full (1 << 26)
59 #ifndef HAVE_SUPPORTED_56000baseKR4_Full
60 #define SUPPORTED_56000baseKR4_Full (1 << 27)
62 #ifndef HAVE_SUPPORTED_56000baseCR4_Full
63 #define SUPPORTED_56000baseCR4_Full (1 << 28)
65 #ifndef HAVE_SUPPORTED_56000baseSR4_Full
66 #define SUPPORTED_56000baseSR4_Full (1 << 29)
68 #ifndef HAVE_SUPPORTED_56000baseLR4_Full
69 #define SUPPORTED_56000baseLR4_Full (1 << 30)
72 /* Add defines in case the running kernel is not the same as user headers. */
73 #ifndef ETHTOOL_GLINKSETTINGS
74 struct ethtool_link_settings {
83 uint8_t eth_tp_mdix_ctrl;
84 int8_t link_mode_masks_nwords;
86 uint32_t link_mode_masks[];
89 /* The kernel values can be found in /include/uapi/linux/ethtool.h */
90 #define ETHTOOL_GLINKSETTINGS 0x0000004c
91 #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
92 #define ETHTOOL_LINK_MODE_Autoneg_BIT 6
93 #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17
94 #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18
95 #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19
96 #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20
97 #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21
98 #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22
99 #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23
100 #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24
101 #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25
102 #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26
103 #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27
104 #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28
105 #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29
106 #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30
108 #ifndef HAVE_ETHTOOL_LINK_MODE_25G
109 #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31
110 #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32
111 #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33
113 #ifndef HAVE_ETHTOOL_LINK_MODE_50G
114 #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34
115 #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35
117 #ifndef HAVE_ETHTOOL_LINK_MODE_100G
118 #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36
119 #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37
120 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
121 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
123 #ifndef HAVE_ETHTOOL_LINK_MODE_200G
124 #define ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT 62
125 #define ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT 63
126 #define ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT 0 /* 64 - 64 */
127 #define ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT 1 /* 65 - 64 */
128 #define ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT 2 /* 66 - 64 */
132 * Get master interface name from private structure.
135 * Pointer to Ethernet device.
137 * Interface name output buffer.
140 * 0 on success, a negative errno value otherwise and rte_errno is set.
143 mlx5_get_master_ifname(const char *ibdev_path, char (*ifname)[IF_NAMESIZE])
147 unsigned int dev_type = 0;
148 unsigned int dev_port_prev = ~0u;
149 char match[IF_NAMESIZE] = "";
151 MLX5_ASSERT(ibdev_path);
153 MKSTR(path, "%s/device/net", ibdev_path);
161 while ((dent = readdir(dir)) != NULL) {
162 char *name = dent->d_name;
164 unsigned int dev_port;
167 if ((name[0] == '.') &&
168 ((name[1] == '\0') ||
169 ((name[1] == '.') && (name[2] == '\0'))))
172 MKSTR(path, "%s/device/net/%s/%s",
174 (dev_type ? "dev_id" : "dev_port"));
176 file = fopen(path, "rb");
181 * Switch to dev_id when dev_port does not exist as
182 * is the case with Linux kernel versions < 3.15.
193 r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
198 * Switch to dev_id when dev_port returns the same value for
199 * all ports. May happen when using a MOFED release older than
200 * 3.0 with a Linux kernel >= 3.15.
202 if (dev_port == dev_port_prev)
204 dev_port_prev = dev_port;
206 strlcpy(match, name, sizeof(match));
209 if (match[0] == '\0') {
213 strncpy(*ifname, match, sizeof(*ifname));
218 * Get interface name from private structure.
220 * This is a port representor-aware version of mlx5_get_master_ifname().
223 * Pointer to Ethernet device.
225 * Interface name output buffer.
228 * 0 on success, a negative errno value otherwise and rte_errno is set.
231 mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
233 struct mlx5_priv *priv = dev->data->dev_private;
234 unsigned int ifindex;
237 MLX5_ASSERT(priv->sh);
238 ifindex = mlx5_ifindex(dev);
240 if (!priv->representor)
241 return mlx5_get_master_ifname(priv->sh->ibdev_path,
246 if (if_indextoname(ifindex, &(*ifname)[0]))
253 * Get the interface index from device name.
256 * Pointer to Ethernet device.
259 * Nonzero interface index on success, zero otherwise and rte_errno is set.
262 mlx5_ifindex(const struct rte_eth_dev *dev)
264 struct mlx5_priv *priv = dev->data->dev_private;
265 unsigned int ifindex;
268 MLX5_ASSERT(priv->if_index);
269 ifindex = priv->if_index;
276 * Perform ifreq ioctl() on associated Ethernet device.
279 * Pointer to Ethernet device.
281 * Request number to pass to ioctl().
283 * Interface request structure output buffer.
286 * 0 on success, a negative errno value otherwise and rte_errno is set.
289 mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr)
291 int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
298 ret = mlx5_get_ifname(dev, &ifr->ifr_name);
301 ret = ioctl(sock, req, ifr);
317 * Pointer to Ethernet device.
319 * MTU value output buffer.
322 * 0 on success, a negative errno value otherwise and rte_errno is set.
325 mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
327 struct ifreq request;
328 int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request);
332 *mtu = request.ifr_mtu;
340 * Pointer to Ethernet device.
345 * 0 on success, a negative errno value otherwise and rte_errno is set.
348 mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
350 struct ifreq request = { .ifr_mtu = mtu, };
352 return mlx5_ifreq(dev, SIOCSIFMTU, &request);
359 * Pointer to Ethernet device.
361 * Bitmask for flags that must remain untouched.
363 * Bitmask for flags to modify.
366 * 0 on success, a negative errno value otherwise and rte_errno is set.
369 mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
371 struct ifreq request;
372 int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request);
376 request.ifr_flags &= keep;
377 request.ifr_flags |= flags & ~keep;
378 return mlx5_ifreq(dev, SIOCSIFFLAGS, &request);
382 * DPDK callback for Ethernet device configuration.
385 * Pointer to Ethernet device structure.
388 * 0 on success, a negative errno value otherwise and rte_errno is set.
391 mlx5_dev_configure(struct rte_eth_dev *dev)
393 struct mlx5_priv *priv = dev->data->dev_private;
394 unsigned int rxqs_n = dev->data->nb_rx_queues;
395 unsigned int txqs_n = dev->data->nb_tx_queues;
396 const uint8_t use_app_rss_key =
397 !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
400 if (use_app_rss_key &&
401 (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
402 MLX5_RSS_HASH_KEY_LEN)) {
403 DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long",
404 dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN));
408 priv->rss_conf.rss_key =
409 rte_realloc(priv->rss_conf.rss_key,
410 MLX5_RSS_HASH_KEY_LEN, 0);
411 if (!priv->rss_conf.rss_key) {
412 DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
413 dev->data->port_id, rxqs_n);
418 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
419 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
421 memcpy(priv->rss_conf.rss_key,
423 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key :
424 rss_hash_default_key,
425 MLX5_RSS_HASH_KEY_LEN);
426 priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN;
427 priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
428 priv->rxqs = (void *)dev->data->rx_queues;
429 priv->txqs = (void *)dev->data->tx_queues;
430 if (txqs_n != priv->txqs_n) {
431 DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u",
432 dev->data->port_id, priv->txqs_n, txqs_n);
433 priv->txqs_n = txqs_n;
435 if (rxqs_n > priv->config.ind_table_max_size) {
436 DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
437 dev->data->port_id, rxqs_n);
441 if (rxqs_n != priv->rxqs_n) {
442 DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
443 dev->data->port_id, priv->rxqs_n, rxqs_n);
444 priv->rxqs_n = rxqs_n;
446 priv->skip_default_rss_reta = 0;
447 ret = mlx5_proc_priv_init(dev);
454 * Configure default RSS reta.
457 * Pointer to Ethernet device structure.
460 * 0 on success, a negative errno value otherwise and rte_errno is set.
463 mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev)
465 struct mlx5_priv *priv = dev->data->dev_private;
466 unsigned int rxqs_n = dev->data->nb_rx_queues;
469 unsigned int reta_idx_n;
471 unsigned int *rss_queue_arr = NULL;
472 unsigned int rss_queue_n = 0;
474 if (priv->skip_default_rss_reta)
476 rss_queue_arr = rte_malloc("", rxqs_n * sizeof(unsigned int), 0);
477 if (!rss_queue_arr) {
478 DRV_LOG(ERR, "port %u cannot allocate RSS queue list (%u)",
479 dev->data->port_id, rxqs_n);
483 for (i = 0, j = 0; i < rxqs_n; i++) {
484 struct mlx5_rxq_data *rxq_data;
485 struct mlx5_rxq_ctrl *rxq_ctrl;
487 rxq_data = (*priv->rxqs)[i];
488 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
489 if (rxq_ctrl && rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
490 rss_queue_arr[j++] = i;
493 if (rss_queue_n > priv->config.ind_table_max_size) {
494 DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
495 dev->data->port_id, rss_queue_n);
497 rte_free(rss_queue_arr);
500 DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
501 dev->data->port_id, priv->rxqs_n, rxqs_n);
502 priv->rxqs_n = rxqs_n;
504 * If the requested number of RX queues is not a power of two,
505 * use the maximum indirection table size for better balancing.
506 * The result is always rounded to the next power of two.
508 reta_idx_n = (1 << log2above((rss_queue_n & (rss_queue_n - 1)) ?
509 priv->config.ind_table_max_size :
511 ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
513 rte_free(rss_queue_arr);
517 * When the number of RX queues is not a power of two,
518 * the remaining table entries are padded with reused WQs
519 * and hashes are not spread uniformly.
521 for (i = 0, j = 0; (i != reta_idx_n); ++i) {
522 (*priv->reta_idx)[i] = rss_queue_arr[j];
523 if (++j == rss_queue_n)
526 rte_free(rss_queue_arr);
531 * Sets default tuning parameters.
534 * Pointer to Ethernet device.
536 * Info structure output buffer.
539 mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
541 struct mlx5_priv *priv = dev->data->dev_private;
543 /* Minimum CPU utilization. */
544 info->default_rxportconf.ring_size = 256;
545 info->default_txportconf.ring_size = 256;
546 info->default_rxportconf.burst_size = MLX5_RX_DEFAULT_BURST;
547 info->default_txportconf.burst_size = MLX5_TX_DEFAULT_BURST;
548 if ((priv->link_speed_capa & ETH_LINK_SPEED_200G) |
549 (priv->link_speed_capa & ETH_LINK_SPEED_100G)) {
550 info->default_rxportconf.nb_queues = 16;
551 info->default_txportconf.nb_queues = 16;
552 if (dev->data->nb_rx_queues > 2 ||
553 dev->data->nb_tx_queues > 2) {
554 /* Max Throughput. */
555 info->default_rxportconf.ring_size = 2048;
556 info->default_txportconf.ring_size = 2048;
559 info->default_rxportconf.nb_queues = 8;
560 info->default_txportconf.nb_queues = 8;
561 if (dev->data->nb_rx_queues > 2 ||
562 dev->data->nb_tx_queues > 2) {
563 /* Max Throughput. */
564 info->default_rxportconf.ring_size = 4096;
565 info->default_txportconf.ring_size = 4096;
571 * Sets tx mbuf limiting parameters.
574 * Pointer to Ethernet device.
576 * Info structure output buffer.
579 mlx5_set_txlimit_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
581 struct mlx5_priv *priv = dev->data->dev_private;
582 struct mlx5_dev_config *config = &priv->config;
586 inlen = (config->txq_inline_max == MLX5_ARG_UNSET) ?
587 MLX5_SEND_DEF_INLINE_LEN :
588 (unsigned int)config->txq_inline_max;
589 MLX5_ASSERT(config->txq_inline_min >= 0);
590 inlen = RTE_MAX(inlen, (unsigned int)config->txq_inline_min);
591 inlen = RTE_MIN(inlen, MLX5_WQE_SIZE_MAX +
592 MLX5_ESEG_MIN_INLINE_SIZE -
595 MLX5_WQE_DSEG_SIZE * 2);
596 nb_max = (MLX5_WQE_SIZE_MAX +
597 MLX5_ESEG_MIN_INLINE_SIZE -
601 inlen) / MLX5_WSEG_SIZE;
602 info->tx_desc_lim.nb_seg_max = nb_max;
603 info->tx_desc_lim.nb_mtu_seg_max = nb_max;
607 * DPDK callback to get information about the device.
610 * Pointer to Ethernet device structure.
612 * Info structure output buffer.
615 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
617 struct mlx5_priv *priv = dev->data->dev_private;
618 struct mlx5_dev_config *config = &priv->config;
621 /* FIXME: we should ask the device for these values. */
622 info->min_rx_bufsize = 32;
623 info->max_rx_pktlen = 65536;
624 info->max_lro_pkt_size = MLX5_MAX_LRO_SIZE;
626 * Since we need one CQ per QP, the limit is the minimum number
627 * between the two values.
629 max = RTE_MIN(priv->sh->device_attr.orig_attr.max_cq,
630 priv->sh->device_attr.orig_attr.max_qp);
631 /* max_rx_queues is uint16_t. */
632 max = RTE_MIN(max, (unsigned int)UINT16_MAX);
633 info->max_rx_queues = max;
634 info->max_tx_queues = max;
635 info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
636 info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
637 info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
638 info->rx_queue_offload_capa);
639 info->tx_offload_capa = mlx5_get_tx_port_offloads(dev);
640 info->if_index = mlx5_ifindex(dev);
641 info->reta_size = priv->reta_idx_n ?
642 priv->reta_idx_n : config->ind_table_max_size;
643 info->hash_key_size = MLX5_RSS_HASH_KEY_LEN;
644 info->speed_capa = priv->link_speed_capa;
645 info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
646 mlx5_set_default_params(dev, info);
647 mlx5_set_txlimit_params(dev, info);
648 info->switch_info.name = dev->data->name;
649 info->switch_info.domain_id = priv->domain_id;
650 info->switch_info.port_id = priv->representor_id;
651 if (priv->representor) {
654 if (priv->pf_bond >= 0) {
656 * Switch port ID is opaque value with driver defined
657 * format. Push the PF index in bonding configurations
658 * in upper four bits of port ID. If we get too many
659 * representors (more than 4K) or PFs (more than 15)
660 * this approach must be reconsidered.
662 if ((info->switch_info.port_id >>
663 MLX5_PORT_ID_BONDING_PF_SHIFT) ||
664 priv->pf_bond > MLX5_PORT_ID_BONDING_PF_MASK) {
665 DRV_LOG(ERR, "can't update switch port ID"
666 " for bonding device");
670 info->switch_info.port_id |=
671 priv->pf_bond << MLX5_PORT_ID_BONDING_PF_SHIFT;
673 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
674 struct mlx5_priv *opriv =
675 rte_eth_devices[port_id].data->dev_private;
678 opriv->representor ||
679 opriv->sh != priv->sh ||
680 opriv->domain_id != priv->domain_id)
683 * Override switch name with that of the master
686 info->switch_info.name = opriv->dev_data->name;
694 * Get device current raw clock counter
697 * Pointer to Ethernet device structure.
699 * Current raw clock counter of the device.
702 * 0 if the clock has correctly been read
703 * The value of errno in case of error
706 mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock)
708 struct mlx5_priv *priv = dev->data->dev_private;
709 struct ibv_context *ctx = priv->sh->ctx;
710 struct ibv_values_ex values;
713 values.comp_mask = IBV_VALUES_MASK_RAW_CLOCK;
714 err = mlx5_glue->query_rt_values_ex(ctx, &values);
716 DRV_LOG(WARNING, "Could not query the clock !");
719 *clock = values.raw_clock.tv_nsec;
724 * Get firmware version of a device.
727 * Ethernet device port.
729 * String output allocated by caller.
731 * Size of the output string, including terminating null byte.
734 * 0 on success, or the size of the non truncated string if too big.
736 int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
738 struct mlx5_priv *priv = dev->data->dev_private;
739 struct ibv_device_attr *attr = &priv->sh->device_attr.orig_attr;
740 size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1;
745 strlcpy(fw_ver, attr->fw_ver, fw_size);
750 * Get supported packet types.
753 * Pointer to Ethernet device structure.
756 * A pointer to the supported Packet types array.
759 mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
761 static const uint32_t ptypes[] = {
762 /* refers to rxq_cq_to_pkt_type() */
764 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
765 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
766 RTE_PTYPE_L4_NONFRAG,
770 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
771 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
772 RTE_PTYPE_INNER_L4_NONFRAG,
773 RTE_PTYPE_INNER_L4_FRAG,
774 RTE_PTYPE_INNER_L4_TCP,
775 RTE_PTYPE_INNER_L4_UDP,
779 if (dev->rx_pkt_burst == mlx5_rx_burst ||
780 dev->rx_pkt_burst == mlx5_rx_burst_mprq ||
781 dev->rx_pkt_burst == mlx5_rx_burst_vec)
787 * Retrieve the master device for representor in the same switch domain.
790 * Pointer to representor Ethernet device structure.
793 * Master device structure on success, NULL otherwise.
796 static struct rte_eth_dev *
797 mlx5_find_master_dev(struct rte_eth_dev *dev)
799 struct mlx5_priv *priv;
803 priv = dev->data->dev_private;
804 domain_id = priv->domain_id;
805 MLX5_ASSERT(priv->representor);
806 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
807 struct mlx5_priv *opriv =
808 rte_eth_devices[port_id].data->dev_private;
811 opriv->domain_id == domain_id &&
812 opriv->sh == priv->sh)
813 return &rte_eth_devices[port_id];
819 * DPDK callback to retrieve physical link information.
822 * Pointer to Ethernet device structure.
824 * Storage for current link status.
827 * 0 on success, a negative errno value otherwise and rte_errno is set.
830 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
831 struct rte_eth_link *link)
833 struct mlx5_priv *priv = dev->data->dev_private;
834 struct ethtool_cmd edata = {
835 .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
838 struct rte_eth_link dev_link;
842 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
844 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
845 dev->data->port_id, strerror(rte_errno));
848 dev_link = (struct rte_eth_link) {
849 .link_status = ((ifr.ifr_flags & IFF_UP) &&
850 (ifr.ifr_flags & IFF_RUNNING)),
852 ifr = (struct ifreq) {
853 .ifr_data = (void *)&edata,
855 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
857 if (ret == -ENOTSUP && priv->representor) {
858 struct rte_eth_dev *master;
861 * For representors we can try to inherit link
862 * settings from the master device. Actually
863 * link settings do not make a lot of sense
864 * for representors due to missing physical
865 * link. The old kernel drivers supported
866 * emulated settings query for representors,
867 * the new ones do not, so we have to add
868 * this code for compatibility issues.
870 master = mlx5_find_master_dev(dev);
872 ifr = (struct ifreq) {
873 .ifr_data = (void *)&edata,
875 ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr);
880 "port %u ioctl(SIOCETHTOOL,"
881 " ETHTOOL_GSET) failed: %s",
882 dev->data->port_id, strerror(rte_errno));
886 link_speed = ethtool_cmd_speed(&edata);
887 if (link_speed == -1)
888 dev_link.link_speed = ETH_SPEED_NUM_NONE;
890 dev_link.link_speed = link_speed;
891 priv->link_speed_capa = 0;
892 if (edata.supported & SUPPORTED_Autoneg)
893 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
894 if (edata.supported & (SUPPORTED_1000baseT_Full |
895 SUPPORTED_1000baseKX_Full))
896 priv->link_speed_capa |= ETH_LINK_SPEED_1G;
897 if (edata.supported & SUPPORTED_10000baseKR_Full)
898 priv->link_speed_capa |= ETH_LINK_SPEED_10G;
899 if (edata.supported & (SUPPORTED_40000baseKR4_Full |
900 SUPPORTED_40000baseCR4_Full |
901 SUPPORTED_40000baseSR4_Full |
902 SUPPORTED_40000baseLR4_Full))
903 priv->link_speed_capa |= ETH_LINK_SPEED_40G;
904 dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
905 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
906 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
907 ETH_LINK_SPEED_FIXED);
908 if (((dev_link.link_speed && !dev_link.link_status) ||
909 (!dev_link.link_speed && dev_link.link_status))) {
918 * Retrieve physical link information (unlocked version using new ioctl).
921 * Pointer to Ethernet device structure.
923 * Storage for current link status.
926 * 0 on success, a negative errno value otherwise and rte_errno is set.
929 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
930 struct rte_eth_link *link)
933 struct mlx5_priv *priv = dev->data->dev_private;
934 struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
936 struct rte_eth_link dev_link;
937 struct rte_eth_dev *master = NULL;
941 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
943 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
944 dev->data->port_id, strerror(rte_errno));
947 dev_link = (struct rte_eth_link) {
948 .link_status = ((ifr.ifr_flags & IFF_UP) &&
949 (ifr.ifr_flags & IFF_RUNNING)),
951 ifr = (struct ifreq) {
952 .ifr_data = (void *)&gcmd,
954 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
956 if (ret == -ENOTSUP && priv->representor) {
958 * For representors we can try to inherit link
959 * settings from the master device. Actually
960 * link settings do not make a lot of sense
961 * for representors due to missing physical
962 * link. The old kernel drivers supported
963 * emulated settings query for representors,
964 * the new ones do not, so we have to add
965 * this code for compatibility issues.
967 master = mlx5_find_master_dev(dev);
969 ifr = (struct ifreq) {
970 .ifr_data = (void *)&gcmd,
972 ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr);
977 "port %u ioctl(SIOCETHTOOL,"
978 " ETHTOOL_GLINKSETTINGS) failed: %s",
979 dev->data->port_id, strerror(rte_errno));
984 gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
986 alignas(struct ethtool_link_settings)
987 uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) +
988 sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3];
989 struct ethtool_link_settings *ecmd = (void *)data;
992 ifr.ifr_data = (void *)ecmd;
993 ret = mlx5_ifreq(master ? master : dev, SIOCETHTOOL, &ifr);
996 "port %u ioctl(SIOCETHTOOL,"
997 "ETHTOOL_GLINKSETTINGS) failed: %s",
998 dev->data->port_id, strerror(rte_errno));
1001 dev_link.link_speed = (ecmd->speed == UINT32_MAX) ? ETH_SPEED_NUM_NONE :
1003 sc = ecmd->link_mode_masks[0] |
1004 ((uint64_t)ecmd->link_mode_masks[1] << 32);
1005 priv->link_speed_capa = 0;
1006 if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT))
1007 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
1008 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
1009 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
1010 priv->link_speed_capa |= ETH_LINK_SPEED_1G;
1011 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
1012 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
1013 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
1014 priv->link_speed_capa |= ETH_LINK_SPEED_10G;
1015 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
1016 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
1017 priv->link_speed_capa |= ETH_LINK_SPEED_20G;
1018 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
1019 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
1020 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
1021 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
1022 priv->link_speed_capa |= ETH_LINK_SPEED_40G;
1023 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
1024 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
1025 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
1026 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
1027 priv->link_speed_capa |= ETH_LINK_SPEED_56G;
1028 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
1029 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
1030 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
1031 priv->link_speed_capa |= ETH_LINK_SPEED_25G;
1032 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
1033 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
1034 priv->link_speed_capa |= ETH_LINK_SPEED_50G;
1035 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
1036 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
1037 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
1038 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
1039 priv->link_speed_capa |= ETH_LINK_SPEED_100G;
1040 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT) |
1041 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT)))
1042 priv->link_speed_capa |= ETH_LINK_SPEED_200G;
1044 sc = ecmd->link_mode_masks[2] |
1045 ((uint64_t)ecmd->link_mode_masks[3] << 32);
1046 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT) |
1048 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT) |
1049 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT)))
1050 priv->link_speed_capa |= ETH_LINK_SPEED_200G;
1051 dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
1052 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
1053 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1054 ETH_LINK_SPEED_FIXED);
1055 if (((dev_link.link_speed && !dev_link.link_status) ||
1056 (!dev_link.link_speed && dev_link.link_status))) {
1065 * DPDK callback to retrieve physical link information.
1068 * Pointer to Ethernet device structure.
1069 * @param wait_to_complete
1070 * Wait for request completion.
1073 * 0 if link status was not updated, positive if it was, a negative errno
1074 * value otherwise and rte_errno is set.
1077 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1080 struct rte_eth_link dev_link;
1081 time_t start_time = time(NULL);
1082 int retry = MLX5_GET_LINK_STATUS_RETRY_COUNT;
1085 ret = mlx5_link_update_unlocked_gs(dev, &dev_link);
1086 if (ret == -ENOTSUP)
1087 ret = mlx5_link_update_unlocked_gset(dev, &dev_link);
1090 /* Handle wait to complete situation. */
1091 if ((wait_to_complete || retry) && ret == -EAGAIN) {
1092 if (abs((int)difftime(time(NULL), start_time)) <
1093 MLX5_LINK_STATUS_TIMEOUT) {
1100 } else if (ret < 0) {
1103 } while (wait_to_complete || retry-- > 0);
1104 ret = !!memcmp(&dev->data->dev_link, &dev_link,
1105 sizeof(struct rte_eth_link));
1106 dev->data->dev_link = dev_link;
1111 * DPDK callback to change the MTU.
1114 * Pointer to Ethernet device structure.
1119 * 0 on success, a negative errno value otherwise and rte_errno is set.
1122 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1124 struct mlx5_priv *priv = dev->data->dev_private;
1125 uint16_t kern_mtu = 0;
1128 ret = mlx5_get_mtu(dev, &kern_mtu);
1131 /* Set kernel interface MTU first. */
1132 ret = mlx5_set_mtu(dev, mtu);
1135 ret = mlx5_get_mtu(dev, &kern_mtu);
1138 if (kern_mtu == mtu) {
1140 DRV_LOG(DEBUG, "port %u adapter MTU set to %u",
1141 dev->data->port_id, mtu);
1149 * DPDK callback to get flow control status.
1152 * Pointer to Ethernet device structure.
1153 * @param[out] fc_conf
1154 * Flow control output buffer.
1157 * 0 on success, a negative errno value otherwise and rte_errno is set.
1160 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1163 struct ethtool_pauseparam ethpause = {
1164 .cmd = ETHTOOL_GPAUSEPARAM
1168 ifr.ifr_data = (void *)ðpause;
1169 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
1172 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
1174 dev->data->port_id, strerror(rte_errno));
1177 fc_conf->autoneg = ethpause.autoneg;
1178 if (ethpause.rx_pause && ethpause.tx_pause)
1179 fc_conf->mode = RTE_FC_FULL;
1180 else if (ethpause.rx_pause)
1181 fc_conf->mode = RTE_FC_RX_PAUSE;
1182 else if (ethpause.tx_pause)
1183 fc_conf->mode = RTE_FC_TX_PAUSE;
1185 fc_conf->mode = RTE_FC_NONE;
1190 * DPDK callback to modify flow control parameters.
1193 * Pointer to Ethernet device structure.
1194 * @param[in] fc_conf
1195 * Flow control parameters.
1198 * 0 on success, a negative errno value otherwise and rte_errno is set.
1201 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1204 struct ethtool_pauseparam ethpause = {
1205 .cmd = ETHTOOL_SPAUSEPARAM
1209 ifr.ifr_data = (void *)ðpause;
1210 ethpause.autoneg = fc_conf->autoneg;
1211 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1212 (fc_conf->mode & RTE_FC_RX_PAUSE))
1213 ethpause.rx_pause = 1;
1215 ethpause.rx_pause = 0;
1217 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1218 (fc_conf->mode & RTE_FC_TX_PAUSE))
1219 ethpause.tx_pause = 1;
1221 ethpause.tx_pause = 0;
1222 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
1225 "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
1227 dev->data->port_id, strerror(rte_errno));
1234 * Handle asynchronous removal event for entire multiport device.
1237 * Infiniband device shared context.
1240 mlx5_dev_interrupt_device_fatal(struct mlx5_ibv_shared *sh)
1244 for (i = 0; i < sh->max_port; ++i) {
1245 struct rte_eth_dev *dev;
1247 if (sh->port[i].ih_port_id >= RTE_MAX_ETHPORTS) {
1249 * Or not existing port either no
1250 * handler installed for this port.
1254 dev = &rte_eth_devices[sh->port[i].ih_port_id];
1256 if (dev->data->dev_conf.intr_conf.rmv)
1257 _rte_eth_dev_callback_process
1258 (dev, RTE_ETH_EVENT_INTR_RMV, NULL);
1263 * Handle shared asynchronous events the NIC (removal event
1264 * and link status change). Supports multiport IB device.
1267 * Callback argument.
1270 mlx5_dev_interrupt_handler(void *cb_arg)
1272 struct mlx5_ibv_shared *sh = cb_arg;
1273 struct ibv_async_event event;
1275 /* Read all message from the IB device and acknowledge them. */
1277 struct rte_eth_dev *dev;
1280 if (mlx5_glue->get_async_event(sh->ctx, &event))
1282 /* Retrieve and check IB port index. */
1283 tmp = (uint32_t)event.element.port_num;
1284 if (!tmp && event.event_type == IBV_EVENT_DEVICE_FATAL) {
1286 * The DEVICE_FATAL event is called once for
1287 * entire device without port specifying.
1288 * We should notify all existing ports.
1290 mlx5_glue->ack_async_event(&event);
1291 mlx5_dev_interrupt_device_fatal(sh);
1294 MLX5_ASSERT(tmp && (tmp <= sh->max_port));
1296 /* Unsupported devive level event. */
1297 mlx5_glue->ack_async_event(&event);
1299 "unsupported common event (type %d)",
1303 if (tmp > sh->max_port) {
1304 /* Invalid IB port index. */
1305 mlx5_glue->ack_async_event(&event);
1307 "cannot handle an event (type %d)"
1308 "due to invalid IB port index (%u)",
1309 event.event_type, tmp);
1312 if (sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) {
1313 /* No handler installed. */
1314 mlx5_glue->ack_async_event(&event);
1316 "cannot handle an event (type %d)"
1317 "due to no handler installed for port %u",
1318 event.event_type, tmp);
1321 /* Retrieve ethernet device descriptor. */
1322 tmp = sh->port[tmp - 1].ih_port_id;
1323 dev = &rte_eth_devices[tmp];
1325 if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
1326 event.event_type == IBV_EVENT_PORT_ERR) &&
1327 dev->data->dev_conf.intr_conf.lsc) {
1328 mlx5_glue->ack_async_event(&event);
1329 if (mlx5_link_update(dev, 0) == -EAGAIN) {
1333 _rte_eth_dev_callback_process
1334 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1338 "port %u cannot handle an unknown event (type %d)",
1339 dev->data->port_id, event.event_type);
1340 mlx5_glue->ack_async_event(&event);
1345 * Unregister callback handler safely. The handler may be active
1346 * while we are trying to unregister it, in this case code -EAGAIN
1347 * is returned by rte_intr_callback_unregister(). This routine checks
1348 * the return code and tries to unregister handler again.
1353 * pointer to callback routine
1355 * opaque callback parameter
1358 mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
1359 rte_intr_callback_fn cb_fn, void *cb_arg)
1362 * Try to reduce timeout management overhead by not calling
1363 * the timer related routines on the first iteration. If the
1364 * unregistering succeeds on first call there will be no
1365 * timer calls at all.
1373 ret = rte_intr_callback_unregister(handle, cb_fn, cb_arg);
1376 if (ret != -EAGAIN) {
1377 DRV_LOG(INFO, "failed to unregister interrupt"
1378 " handler (error: %d)", ret);
1383 struct timespec onems;
1385 /* Wait one millisecond and try again. */
1387 onems.tv_nsec = NS_PER_S / MS_PER_S;
1388 nanosleep(&onems, 0);
1389 /* Check whether one second elapsed. */
1390 if ((rte_get_timer_cycles() - start) <= twait)
1394 * We get the amount of timer ticks for one second.
1395 * If this amount elapsed it means we spent one
1396 * second in waiting. This branch is executed once
1397 * on first iteration.
1399 twait = rte_get_timer_hz();
1403 * Timeout elapsed, show message (once a second) and retry.
1404 * We have no other acceptable option here, if we ignore
1405 * the unregistering return code the handler will not
1406 * be unregistered, fd will be closed and we may get the
1407 * crush. Hanging and messaging in the loop seems not to be
1410 DRV_LOG(INFO, "Retrying to unregister interrupt handler");
1411 start = rte_get_timer_cycles();
1416 * Handle DEVX interrupts from the NIC.
1417 * This function is probably called from the DPDK host thread.
1420 * Callback argument.
1423 mlx5_dev_interrupt_handler_devx(void *cb_arg)
1425 #ifndef HAVE_IBV_DEVX_ASYNC
1429 struct mlx5_ibv_shared *sh = cb_arg;
1431 struct mlx5dv_devx_async_cmd_hdr cmd_resp;
1432 uint8_t buf[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
1433 MLX5_ST_SZ_BYTES(traffic_counter) +
1434 sizeof(struct mlx5dv_devx_async_cmd_hdr)];
1436 uint8_t *buf = out.buf + sizeof(out.cmd_resp);
1438 while (!mlx5_glue->devx_get_async_cmd_comp(sh->devx_comp,
1441 mlx5_flow_async_pool_query_handle
1442 (sh, (uint64_t)out.cmd_resp.wr_id,
1443 mlx5_devx_get_out_command_status(buf));
1444 #endif /* HAVE_IBV_DEVX_ASYNC */
1448 * Uninstall shared asynchronous device events handler.
1449 * This function is implemented to support event sharing
1450 * between multiple ports of single IB device.
1453 * Pointer to Ethernet device.
1456 mlx5_dev_shared_handler_uninstall(struct rte_eth_dev *dev)
1458 struct mlx5_priv *priv = dev->data->dev_private;
1459 struct mlx5_ibv_shared *sh = priv->sh;
1461 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1463 pthread_mutex_lock(&sh->intr_mutex);
1464 MLX5_ASSERT(priv->ibv_port);
1465 MLX5_ASSERT(priv->ibv_port <= sh->max_port);
1466 MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
1467 if (sh->port[priv->ibv_port - 1].ih_port_id >= RTE_MAX_ETHPORTS)
1469 MLX5_ASSERT(sh->port[priv->ibv_port - 1].ih_port_id ==
1470 (uint32_t)dev->data->port_id);
1471 MLX5_ASSERT(sh->intr_cnt);
1472 sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
1473 if (!sh->intr_cnt || --sh->intr_cnt)
1475 mlx5_intr_callback_unregister(&sh->intr_handle,
1476 mlx5_dev_interrupt_handler, sh);
1477 sh->intr_handle.fd = 0;
1478 sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
1480 pthread_mutex_unlock(&sh->intr_mutex);
1484 * Uninstall devx shared asynchronous device events handler.
1485 * This function is implemeted to support event sharing
1486 * between multiple ports of single IB device.
1489 * Pointer to Ethernet device.
1492 mlx5_dev_shared_handler_devx_uninstall(struct rte_eth_dev *dev)
1494 struct mlx5_priv *priv = dev->data->dev_private;
1495 struct mlx5_ibv_shared *sh = priv->sh;
1497 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1499 pthread_mutex_lock(&sh->intr_mutex);
1500 MLX5_ASSERT(priv->ibv_port);
1501 MLX5_ASSERT(priv->ibv_port <= sh->max_port);
1502 MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
1503 if (sh->port[priv->ibv_port - 1].devx_ih_port_id >= RTE_MAX_ETHPORTS)
1505 MLX5_ASSERT(sh->port[priv->ibv_port - 1].devx_ih_port_id ==
1506 (uint32_t)dev->data->port_id);
1507 sh->port[priv->ibv_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
1508 if (!sh->devx_intr_cnt || --sh->devx_intr_cnt)
1510 if (sh->intr_handle_devx.fd) {
1511 rte_intr_callback_unregister(&sh->intr_handle_devx,
1512 mlx5_dev_interrupt_handler_devx,
1514 sh->intr_handle_devx.fd = 0;
1515 sh->intr_handle_devx.type = RTE_INTR_HANDLE_UNKNOWN;
1517 if (sh->devx_comp) {
1518 mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp);
1519 sh->devx_comp = NULL;
1522 pthread_mutex_unlock(&sh->intr_mutex);
1526 * Install shared asynchronous device events handler.
1527 * This function is implemented to support event sharing
1528 * between multiple ports of single IB device.
1531 * Pointer to Ethernet device.
1534 mlx5_dev_shared_handler_install(struct rte_eth_dev *dev)
1536 struct mlx5_priv *priv = dev->data->dev_private;
1537 struct mlx5_ibv_shared *sh = priv->sh;
1541 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1543 pthread_mutex_lock(&sh->intr_mutex);
1544 MLX5_ASSERT(priv->ibv_port);
1545 MLX5_ASSERT(priv->ibv_port <= sh->max_port);
1546 MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
1547 if (sh->port[priv->ibv_port - 1].ih_port_id < RTE_MAX_ETHPORTS) {
1548 /* The handler is already installed for this port. */
1549 MLX5_ASSERT(sh->intr_cnt);
1553 sh->port[priv->ibv_port - 1].ih_port_id =
1554 (uint32_t)dev->data->port_id;
1558 /* No shared handler installed. */
1559 MLX5_ASSERT(sh->ctx->async_fd > 0);
1560 flags = fcntl(sh->ctx->async_fd, F_GETFL);
1561 ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
1563 DRV_LOG(INFO, "failed to change file descriptor async event"
1565 /* Indicate there will be no interrupts. */
1566 dev->data->dev_conf.intr_conf.lsc = 0;
1567 dev->data->dev_conf.intr_conf.rmv = 0;
1569 sh->intr_handle.fd = sh->ctx->async_fd;
1570 sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
1571 rte_intr_callback_register(&sh->intr_handle,
1572 mlx5_dev_interrupt_handler, sh);
1574 sh->port[priv->ibv_port - 1].ih_port_id =
1575 (uint32_t)dev->data->port_id;
1578 pthread_mutex_unlock(&sh->intr_mutex);
1582 * Install devx shared asyncronous device events handler.
1583 * This function is implemeted to support event sharing
1584 * between multiple ports of single IB device.
1587 * Pointer to Ethernet device.
1590 mlx5_dev_shared_handler_devx_install(struct rte_eth_dev *dev)
1592 struct mlx5_priv *priv = dev->data->dev_private;
1593 struct mlx5_ibv_shared *sh = priv->sh;
1595 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1597 pthread_mutex_lock(&sh->intr_mutex);
1598 MLX5_ASSERT(priv->ibv_port);
1599 MLX5_ASSERT(priv->ibv_port <= sh->max_port);
1600 MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
1601 if (sh->port[priv->ibv_port - 1].devx_ih_port_id < RTE_MAX_ETHPORTS) {
1602 /* The handler is already installed for this port. */
1603 MLX5_ASSERT(sh->devx_intr_cnt);
1606 if (sh->devx_intr_cnt) {
1607 sh->devx_intr_cnt++;
1608 sh->port[priv->ibv_port - 1].devx_ih_port_id =
1609 (uint32_t)dev->data->port_id;
1612 if (priv->config.devx) {
1613 #ifndef HAVE_IBV_DEVX_ASYNC
1616 sh->devx_comp = mlx5_glue->devx_create_cmd_comp(sh->ctx);
1617 if (sh->devx_comp) {
1618 int flags = fcntl(sh->devx_comp->fd, F_GETFL);
1619 int ret = fcntl(sh->devx_comp->fd, F_SETFL,
1620 flags | O_NONBLOCK);
1623 DRV_LOG(INFO, "failed to change file descriptor"
1624 " devx async event queue");
1626 sh->intr_handle_devx.fd = sh->devx_comp->fd;
1627 sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT;
1628 rte_intr_callback_register
1629 (&sh->intr_handle_devx,
1630 mlx5_dev_interrupt_handler_devx, sh);
1631 sh->devx_intr_cnt++;
1632 sh->port[priv->ibv_port - 1].devx_ih_port_id =
1633 (uint32_t)dev->data->port_id;
1636 #endif /* HAVE_IBV_DEVX_ASYNC */
1639 pthread_mutex_unlock(&sh->intr_mutex);
1643 * Uninstall interrupt handler.
1646 * Pointer to Ethernet device.
1649 mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
1651 mlx5_dev_shared_handler_uninstall(dev);
1655 * Install interrupt handler.
1658 * Pointer to Ethernet device.
1661 mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
1663 mlx5_dev_shared_handler_install(dev);
1667 * Devx uninstall interrupt handler.
1670 * Pointer to Ethernet device.
1673 mlx5_dev_interrupt_handler_devx_uninstall(struct rte_eth_dev *dev)
1675 mlx5_dev_shared_handler_devx_uninstall(dev);
1679 * Devx install interrupt handler.
1682 * Pointer to Ethernet device.
1685 mlx5_dev_interrupt_handler_devx_install(struct rte_eth_dev *dev)
1687 mlx5_dev_shared_handler_devx_install(dev);
1691 * DPDK callback to bring the link DOWN.
1694 * Pointer to Ethernet device structure.
1697 * 0 on success, a negative errno value otherwise and rte_errno is set.
1700 mlx5_set_link_down(struct rte_eth_dev *dev)
1702 return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP);
1706 * DPDK callback to bring the link UP.
1709 * Pointer to Ethernet device structure.
1712 * 0 on success, a negative errno value otherwise and rte_errno is set.
1715 mlx5_set_link_up(struct rte_eth_dev *dev)
1717 return mlx5_set_flags(dev, ~IFF_UP, IFF_UP);
1721 * Configure the RX function to use.
1724 * Pointer to private data structure.
1727 * Pointer to selected Rx burst function.
1730 mlx5_select_rx_function(struct rte_eth_dev *dev)
1732 eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
1734 MLX5_ASSERT(dev != NULL);
1735 if (mlx5_check_vec_rx_support(dev) > 0) {
1736 rx_pkt_burst = mlx5_rx_burst_vec;
1737 DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
1738 dev->data->port_id);
1739 } else if (mlx5_mprq_enabled(dev)) {
1740 rx_pkt_burst = mlx5_rx_burst_mprq;
1742 return rx_pkt_burst;
1746 * Check if mlx5 device was removed.
1749 * Pointer to Ethernet device structure.
1752 * 1 when device is removed, otherwise 0.
1755 mlx5_is_removed(struct rte_eth_dev *dev)
1757 struct ibv_device_attr device_attr;
1758 struct mlx5_priv *priv = dev->data->dev_private;
1760 if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO)
1766 * Get the E-Switch parameters by port id.
1771 * Device port id is valid, skip check. This flag is useful
1772 * when trials are performed from probing and device is not
1773 * flagged as valid yet (in attaching process).
1774 * @param[out] es_domain_id
1775 * E-Switch domain id.
1776 * @param[out] es_port_id
1777 * The port id of the port in the E-Switch.
1780 * pointer to device private data structure containing data needed
1781 * on success, NULL otherwise and rte_errno is set.
1784 mlx5_port_to_eswitch_info(uint16_t port, bool valid)
1786 struct rte_eth_dev *dev;
1787 struct mlx5_priv *priv;
1789 if (port >= RTE_MAX_ETHPORTS) {
1793 if (!valid && !rte_eth_dev_is_valid_port(port)) {
1797 dev = &rte_eth_devices[port];
1798 priv = dev->data->dev_private;
1799 if (!(priv->representor || priv->master)) {
1807 * Get the E-Switch parameters by device instance.
1811 * @param[out] es_domain_id
1812 * E-Switch domain id.
1813 * @param[out] es_port_id
1814 * The port id of the port in the E-Switch.
1817 * pointer to device private data structure containing data needed
1818 * on success, NULL otherwise and rte_errno is set.
1821 mlx5_dev_to_eswitch_info(struct rte_eth_dev *dev)
1823 struct mlx5_priv *priv;
1825 priv = dev->data->dev_private;
1826 if (!(priv->representor || priv->master)) {
1834 * Get switch information associated with network interface.
1837 * Network interface index.
1839 * Switch information object, populated in case of success.
1842 * 0 on success, a negative errno value otherwise and rte_errno is set.
1845 mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info)
1847 char ifname[IF_NAMESIZE];
1848 char port_name[IF_NAMESIZE];
1850 struct mlx5_switch_info data = {
1853 .name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET,
1858 bool port_switch_id_set = false;
1859 bool device_dir = false;
1863 if (!if_indextoname(ifindex, ifname)) {
1868 MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name",
1870 MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id",
1872 MKSTR(pci_device, "/sys/class/net/%s/device",
1875 file = fopen(phys_port_name, "rb");
1877 ret = fscanf(file, "%s", port_name);
1880 mlx5_translate_port_name(port_name, &data);
1882 file = fopen(phys_switch_id, "rb");
1887 port_switch_id_set =
1888 fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 &&
1891 dir = opendir(pci_device);
1896 if (port_switch_id_set) {
1897 /* We have some E-Switch configuration. */
1898 mlx5_sysfs_check_switch_info(device_dir, &data);
1901 MLX5_ASSERT(!(data.master && data.representor));
1902 if (data.master && data.representor) {
1903 DRV_LOG(ERR, "ifindex %u device is recognized as master"
1904 " and as representor", ifindex);
1912 * Analyze gathered port parameters via sysfs to recognize master
1913 * and representor devices for E-Switch configuration.
1915 * @param[in] device_dir
1916 * flag of presence of "device" directory under port device key.
1917 * @param[inout] switch_info
1918 * Port information, including port name as a number and port name
1919 * type if recognized
1922 * master and representor flags are set in switch_info according to
1923 * recognized parameters (if any).
1926 mlx5_sysfs_check_switch_info(bool device_dir,
1927 struct mlx5_switch_info *switch_info)
1929 switch (switch_info->name_type) {
1930 case MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN:
1932 * Name is not recognized, assume the master,
1933 * check the device directory presence.
1935 switch_info->master = device_dir;
1937 case MLX5_PHYS_PORT_NAME_TYPE_NOTSET:
1939 * Name is not set, this assumes the legacy naming
1940 * schema for master, just check if there is
1941 * a device directory.
1943 switch_info->master = device_dir;
1945 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
1946 /* New uplink naming schema recognized. */
1947 switch_info->master = 1;
1949 case MLX5_PHYS_PORT_NAME_TYPE_LEGACY:
1950 /* Legacy representors naming schema. */
1951 switch_info->representor = !device_dir;
1953 case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
1954 /* New representors naming schema. */
1955 switch_info->representor = 1;
1961 * DPDK callback to retrieve plug-in module EEPROM information (type and size).
1964 * Pointer to Ethernet device structure.
1965 * @param[out] modinfo
1966 * Storage for plug-in module EEPROM information.
1969 * 0 on success, a negative errno value otherwise and rte_errno is set.
1972 mlx5_get_module_info(struct rte_eth_dev *dev,
1973 struct rte_eth_dev_module_info *modinfo)
1975 struct ethtool_modinfo info = {
1976 .cmd = ETHTOOL_GMODULEINFO,
1978 struct ifreq ifr = (struct ifreq) {
1979 .ifr_data = (void *)&info,
1983 if (!dev || !modinfo) {
1984 DRV_LOG(WARNING, "missing argument, cannot get module info");
1988 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
1990 DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL) failed: %s",
1991 dev->data->port_id, strerror(rte_errno));
1994 modinfo->type = info.type;
1995 modinfo->eeprom_len = info.eeprom_len;
2000 * DPDK callback to retrieve plug-in module EEPROM data.
2003 * Pointer to Ethernet device structure.
2005 * Storage for plug-in module EEPROM data.
2008 * 0 on success, a negative errno value otherwise and rte_errno is set.
2010 int mlx5_get_module_eeprom(struct rte_eth_dev *dev,
2011 struct rte_dev_eeprom_info *info)
2013 struct ethtool_eeprom *eeprom;
2017 if (!dev || !info) {
2018 DRV_LOG(WARNING, "missing argument, cannot get module eeprom");
2022 eeprom = rte_calloc(__func__, 1,
2023 (sizeof(struct ethtool_eeprom) + info->length), 0);
2025 DRV_LOG(WARNING, "port %u cannot allocate memory for "
2026 "eeprom data", dev->data->port_id);
2030 eeprom->cmd = ETHTOOL_GMODULEEEPROM;
2031 eeprom->offset = info->offset;
2032 eeprom->len = info->length;
2033 ifr = (struct ifreq) {
2034 .ifr_data = (void *)eeprom,
2036 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
2038 DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL) failed: %s",
2039 dev->data->port_id, strerror(rte_errno));
2041 rte_memcpy(info->data, eeprom->data, info->length);
2047 * DPDK callback to retrieve hairpin capabilities.
2050 * Pointer to Ethernet device structure.
2052 * Storage for hairpin capability data.
2055 * 0 on success, a negative errno value otherwise and rte_errno is set.
2057 int mlx5_hairpin_cap_get(struct rte_eth_dev *dev,
2058 struct rte_eth_hairpin_cap *cap)
2060 struct mlx5_priv *priv = dev->data->dev_private;
2062 if (priv->sh->devx == 0) {
2063 rte_errno = ENOTSUP;
2066 cap->max_nb_queues = UINT16_MAX;
2067 cap->max_rx_2_tx = 1;
2068 cap->max_tx_2_rx = 1;
2069 cap->max_nb_desc = 8192;