net/mlx5: allow LRO per Rx queue
[dpdk.git] / drivers / net / mlx5 / mlx5_ethdev.c
index 90488af..9629cfb 100644 (file)
@@ -3,12 +3,11 @@
  * Copyright 2015 Mellanox Technologies, Ltd
  */
 
-#define _GNU_SOURCE
-
 #include <stddef.h>
 #include <assert.h>
 #include <inttypes.h>
 #include <unistd.h>
+#include <stdbool.h>
 #include <stdint.h>
 #include <stdio.h>
 #include <string.h>
 #include <rte_malloc.h>
 #include <rte_string_fns.h>
 #include <rte_rwlock.h>
+#include <rte_cycles.h>
 
 #include "mlx5.h"
 #include "mlx5_glue.h"
 #include "mlx5_rxtx.h"
 #include "mlx5_utils.h"
 
+/* Supported speed values found in /usr/include/linux/ethtool.h */
+#ifndef HAVE_SUPPORTED_40000baseKR4_Full
+#define SUPPORTED_40000baseKR4_Full (1 << 23)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseCR4_Full
+#define SUPPORTED_40000baseCR4_Full (1 << 24)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseSR4_Full
+#define SUPPORTED_40000baseSR4_Full (1 << 25)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseLR4_Full
+#define SUPPORTED_40000baseLR4_Full (1 << 26)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseKR4_Full
+#define SUPPORTED_56000baseKR4_Full (1 << 27)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseCR4_Full
+#define SUPPORTED_56000baseCR4_Full (1 << 28)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseSR4_Full
+#define SUPPORTED_56000baseSR4_Full (1 << 29)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseLR4_Full
+#define SUPPORTED_56000baseLR4_Full (1 << 30)
+#endif
+
 /* Add defines in case the running kernel is not the same as user headers. */
 #ifndef ETHTOOL_GLINKSETTINGS
 struct ethtool_link_settings {
@@ -93,7 +119,7 @@ struct ethtool_link_settings {
 #endif
 
 /**
- * Get interface name from private structure.
+ * Get master interface name from private structure.
  *
  * @param[in] dev
  *   Pointer to Ethernet device.
@@ -104,17 +130,17 @@ struct ethtool_link_settings {
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
+mlx5_get_master_ifname(const char *ibdev_path, char (*ifname)[IF_NAMESIZE])
 {
-       struct priv *priv = dev->data->dev_private;
        DIR *dir;
        struct dirent *dent;
        unsigned int dev_type = 0;
        unsigned int dev_port_prev = ~0u;
        char match[IF_NAMESIZE] = "";
 
+       assert(ibdev_path);
        {
-               MKSTR(path, "%s/device/net", priv->ibdev_path);
+               MKSTR(path, "%s/device/net", ibdev_path);
 
                dir = opendir(path);
                if (dir == NULL) {
@@ -134,7 +160,7 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
                        continue;
 
                MKSTR(path, "%s/device/net/%s/%s",
-                     priv->ibdev_path, name,
+                     ibdev_path, name,
                      (dev_type ? "dev_id" : "dev_port"));
 
                file = fopen(path, "rb");
@@ -166,7 +192,7 @@ try_dev_id:
                if (dev_port == dev_port_prev)
                        goto try_dev_id;
                dev_port_prev = dev_port;
-               if (dev_port == (priv->port - 1u))
+               if (dev_port == 0)
                        strlcpy(match, name, sizeof(match));
        }
        closedir(dir);
@@ -179,30 +205,61 @@ try_dev_id:
 }
 
 /**
- * Get the interface index from device name.
+ * Get interface name from private structure.
+ *
+ * This is a port representor-aware version of mlx5_get_master_ifname().
  *
  * @param[in] dev
  *   Pointer to Ethernet device.
+ * @param[out] ifname
+ *   Interface name output buffer.
  *
  * @return
- *   Interface index on success, a negative errno value otherwise and
- *   rte_errno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_ifindex(const struct rte_eth_dev *dev)
+mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
 {
-       char ifname[IF_NAMESIZE];
-       int ret;
-
-       ret = mlx5_get_ifname(dev, &ifname);
-       if (ret)
-               return ret;
-       ret = if_nametoindex(ifname);
-       if (ret == -1) {
-               rte_errno = errno;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       unsigned int ifindex;
+
+       assert(priv);
+       assert(priv->sh);
+       ifindex = mlx5_ifindex(dev);
+       if (!ifindex) {
+               if (!priv->representor)
+                       return mlx5_get_master_ifname(priv->sh->ibdev_path,
+                                                     ifname);
+               rte_errno = ENXIO;
                return -rte_errno;
        }
-       return ret;
+       if (if_indextoname(ifindex, &(*ifname)[0]))
+               return 0;
+       rte_errno = errno;
+       return -rte_errno;
+}
+
+/**
+ * Get the interface index from device name.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   Nonzero interface index on success, zero otherwise and rte_errno is set.
+ */
+unsigned int
+mlx5_ifindex(const struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       unsigned int ifindex;
+
+       assert(priv);
+       assert(priv->if_index);
+       ifindex = priv->if_index;
+       if (!ifindex)
+               rte_errno = ENXIO;
+       return ifindex;
 }
 
 /**
@@ -323,7 +380,7 @@ mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
 int
 mlx5_dev_configure(struct rte_eth_dev *dev)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        unsigned int rxqs_n = dev->data->nb_rx_queues;
        unsigned int txqs_n = dev->data->nb_tx_queues;
        unsigned int i;
@@ -335,15 +392,15 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
 
        if (use_app_rss_key &&
            (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
-            rss_hash_default_key_len)) {
-               DRV_LOG(ERR, "port %u RSS key len must be %zu Bytes long",
-                       dev->data->port_id, rss_hash_default_key_len);
+            MLX5_RSS_HASH_KEY_LEN)) {
+               DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long",
+                       dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN));
                rte_errno = EINVAL;
                return -rte_errno;
        }
        priv->rss_conf.rss_key =
                rte_realloc(priv->rss_conf.rss_key,
-                           rss_hash_default_key_len, 0);
+                           MLX5_RSS_HASH_KEY_LEN, 0);
        if (!priv->rss_conf.rss_key) {
                DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
                        dev->data->port_id, rxqs_n);
@@ -354,8 +411,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
               use_app_rss_key ?
               dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key :
               rss_hash_default_key,
-              rss_hash_default_key_len);
-       priv->rss_conf.rss_key_len = rss_hash_default_key_len;
+              MLX5_RSS_HASH_KEY_LEN);
+       priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN;
        priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
        priv->rxqs = (void *)dev->data->rx_queues;
        priv->txqs = (void *)dev->data->tx_queues;
@@ -370,28 +427,35 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
                rte_errno = EINVAL;
                return -rte_errno;
        }
-       if (rxqs_n == priv->rxqs_n)
-               return 0;
-       DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
-               dev->data->port_id, priv->rxqs_n, rxqs_n);
-       priv->rxqs_n = rxqs_n;
-       /* If the requested number of RX queues is not a power of two, use the
-        * maximum indirection table size for better balancing.
-        * The result is always rounded to the next power of two. */
-       reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
-                                    priv->config.ind_table_max_size :
-                                    rxqs_n));
-       ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
+       if (rxqs_n != priv->rxqs_n) {
+               DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
+                       dev->data->port_id, priv->rxqs_n, rxqs_n);
+               priv->rxqs_n = rxqs_n;
+               /*
+                * If the requested number of RX queues is not a power of two,
+                * use the maximum indirection table size for better balancing.
+                * The result is always rounded to the next power of two.
+                */
+               reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
+                                            priv->config.ind_table_max_size :
+                                            rxqs_n));
+               ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
+               if (ret)
+                       return ret;
+               /*
+                * When the number of RX queues is not a power of two,
+                * the remaining table entries are padded with reused WQs
+                * and hashes are not spread uniformly.
+                */
+               for (i = 0, j = 0; (i != reta_idx_n); ++i) {
+                       (*priv->reta_idx)[i] = j;
+                       if (++j == rxqs_n)
+                               j = 0;
+               }
+       }
+       ret = mlx5_proc_priv_init(dev);
        if (ret)
                return ret;
-       /* When the number of RX queues is not a power of two, the remaining
-        * table entries are padded with reused WQs and hashes are not spread
-        * uniformly. */
-       for (i = 0, j = 0; (i != reta_idx_n); ++i) {
-               (*priv->reta_idx)[i] = j;
-               if (++j == rxqs_n)
-                       j = 0;
-       }
        return 0;
 }
 
@@ -406,7 +470,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
 static void
 mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
        /* Minimum CPU utilization. */
        info->default_rxportconf.ring_size = 256;
@@ -434,6 +498,42 @@ mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
        }
 }
 
+/**
+ * Sets tx mbuf limiting parameters.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param[out] info
+ *   Info structure output buffer.
+ */
+static void
+mlx5_set_txlimit_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_config *config = &priv->config;
+       unsigned int inlen;
+       uint16_t nb_max;
+
+       inlen = (config->txq_inline_max == MLX5_ARG_UNSET) ?
+               MLX5_SEND_DEF_INLINE_LEN :
+               (unsigned int)config->txq_inline_max;
+       assert(config->txq_inline_min >= 0);
+       inlen = RTE_MAX(inlen, (unsigned int)config->txq_inline_min);
+       inlen = RTE_MIN(inlen, MLX5_WQE_SIZE_MAX +
+                              MLX5_ESEG_MIN_INLINE_SIZE -
+                              MLX5_WQE_CSEG_SIZE -
+                              MLX5_WQE_ESEG_SIZE -
+                              MLX5_WQE_DSEG_SIZE * 2);
+       nb_max = (MLX5_WQE_SIZE_MAX +
+                 MLX5_ESEG_MIN_INLINE_SIZE -
+                 MLX5_WQE_CSEG_SIZE -
+                 MLX5_WQE_ESEG_SIZE -
+                 MLX5_WQE_DSEG_SIZE -
+                 inlen) / MLX5_WSEG_SIZE;
+       info->tx_desc_lim.nb_seg_max = nb_max;
+       info->tx_desc_lim.nb_mtu_seg_max = nb_max;
+}
+
 /**
  * DPDK callback to get information about the device.
  *
@@ -445,10 +545,9 @@ mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 void
 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_config *config = &priv->config;
        unsigned int max;
-       char ifname[IF_NAMESIZE];
 
        /* FIXME: we should ask the device for these values. */
        info->min_rx_bufsize = 32;
@@ -457,8 +556,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
         * Since we need one CQ per QP, the limit is the minimum number
         * between the two values.
         */
-       max = RTE_MIN(priv->device_attr.orig_attr.max_cq,
-                     priv->device_attr.orig_attr.max_qp);
+       max = RTE_MIN(priv->sh->device_attr.orig_attr.max_cq,
+                     priv->sh->device_attr.orig_attr.max_qp);
        /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
        if (max >= 65535)
                max = 65535;
@@ -469,14 +568,94 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
        info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
                                 info->rx_queue_offload_capa);
        info->tx_offload_capa = mlx5_get_tx_port_offloads(dev);
-       if (mlx5_get_ifname(dev, &ifname) == 0)
-               info->if_index = if_nametoindex(ifname);
+       info->if_index = mlx5_ifindex(dev);
        info->reta_size = priv->reta_idx_n ?
                priv->reta_idx_n : config->ind_table_max_size;
-       info->hash_key_size = rss_hash_default_key_len;
+       info->hash_key_size = MLX5_RSS_HASH_KEY_LEN;
        info->speed_capa = priv->link_speed_capa;
        info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
        mlx5_set_default_params(dev, info);
+       mlx5_set_txlimit_params(dev, info);
+       info->switch_info.name = dev->data->name;
+       info->switch_info.domain_id = priv->domain_id;
+       info->switch_info.port_id = priv->representor_id;
+       if (priv->representor) {
+               unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0);
+               uint16_t port_id[i];
+
+               i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
+               while (i--) {
+                       struct mlx5_priv *opriv =
+                               rte_eth_devices[port_id[i]].data->dev_private;
+
+                       if (!opriv ||
+                           opriv->representor ||
+                           opriv->domain_id != priv->domain_id)
+                               continue;
+                       /*
+                        * Override switch name with that of the master
+                        * device.
+                        */
+                       info->switch_info.name = opriv->dev_data->name;
+                       break;
+               }
+       }
+}
+
+/**
+ * Get device current raw clock counter
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param[out] time
+ *   Current raw clock counter of the device.
+ *
+ * @return
+ *   0 if the clock has correctly been read
+ *   The value of errno in case of error
+ */
+int
+mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct ibv_context *ctx = priv->sh->ctx;
+       struct ibv_values_ex values;
+       int err = 0;
+
+       values.comp_mask = IBV_VALUES_MASK_RAW_CLOCK;
+       err = mlx5_glue->query_rt_values_ex(ctx, &values);
+       if (err != 0) {
+               DRV_LOG(WARNING, "Could not query the clock !");
+               return err;
+       }
+       *clock = values.raw_clock.tv_nsec;
+       return 0;
+}
+
+/**
+ * Get firmware version of a device.
+ *
+ * @param dev
+ *   Ethernet device port.
+ * @param fw_ver
+ *   String output allocated by caller.
+ * @param fw_size
+ *   Size of the output string, including terminating null byte.
+ *
+ * @return
+ *   0 on success, or the size of the non truncated string if too big.
+ */
+int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct ibv_device_attr *attr = &priv->sh->device_attr.orig_attr;
+       size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1;
+
+       if (fw_size < size)
+               return size;
+       if (fw_ver != NULL)
+               strlcpy(fw_ver, attr->fw_ver, fw_size);
+       return 0;
 }
 
 /**
@@ -516,6 +695,36 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
        return NULL;
 }
 
+/**
+ * Retrieve the master device for representor in the same switch domain.
+ *
+ * @param dev
+ *   Pointer to representor Ethernet device structure.
+ *
+ * @return
+ *   Master device structure  on success, NULL otherwise.
+ */
+
+static struct rte_eth_dev *
+mlx5_find_master_dev(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv;
+       uint16_t port_id;
+       uint16_t domain_id;
+
+       priv = dev->data->dev_private;
+       domain_id = priv->domain_id;
+       assert(priv->representor);
+       RTE_ETH_FOREACH_DEV_OF(port_id, dev->device) {
+               priv = rte_eth_devices[port_id].data->dev_private;
+               if (priv &&
+                   priv->master &&
+                   priv->domain_id == domain_id)
+                       return &rte_eth_devices[port_id];
+       }
+       return NULL;
+}
+
 /**
  * DPDK callback to retrieve physical link information.
  *
@@ -531,7 +740,7 @@ static int
 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
                               struct rte_eth_link *link)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct ethtool_cmd edata = {
                .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
        };
@@ -546,16 +755,43 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
                        dev->data->port_id, strerror(rte_errno));
                return ret;
        }
-       memset(&dev_link, 0, sizeof(dev_link));
-       dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
-                               (ifr.ifr_flags & IFF_RUNNING));
-       ifr.ifr_data = (void *)&edata;
+       dev_link = (struct rte_eth_link) {
+               .link_status = ((ifr.ifr_flags & IFF_UP) &&
+                               (ifr.ifr_flags & IFF_RUNNING)),
+       };
+       ifr = (struct ifreq) {
+               .ifr_data = (void *)&edata,
+       };
        ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
        if (ret) {
-               DRV_LOG(WARNING,
-                       "port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
-                       dev->data->port_id, strerror(rte_errno));
-               return ret;
+               if (ret == -ENOTSUP && priv->representor) {
+                       struct rte_eth_dev *master;
+
+                       /*
+                        * For representors we can try to inherit link
+                        * settings from the master device. Actually
+                        * link settings do not make a lot of sense
+                        * for representors due to missing physical
+                        * link. The old kernel drivers supported
+                        * emulated settings query for representors,
+                        * the new ones do not, so we have to add
+                        * this code for compatibility issues.
+                        */
+                       master = mlx5_find_master_dev(dev);
+                       if (master) {
+                               ifr = (struct ifreq) {
+                                       .ifr_data = (void *)&edata,
+                               };
+                               ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr);
+                       }
+               }
+               if (ret) {
+                       DRV_LOG(WARNING,
+                               "port %u ioctl(SIOCETHTOOL,"
+                               " ETHTOOL_GSET) failed: %s",
+                               dev->data->port_id, strerror(rte_errno));
+                       return ret;
+               }
        }
        link_speed = ethtool_cmd_speed(&edata);
        if (link_speed == -1)
@@ -579,8 +815,8 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
                                ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
        dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
                        ETH_LINK_SPEED_FIXED);
-       if ((dev_link.link_speed && !dev_link.link_status) ||
-           (!dev_link.link_speed && dev_link.link_status)) {
+       if (((dev_link.link_speed && !dev_link.link_status) ||
+            (!dev_link.link_speed && dev_link.link_status))) {
                rte_errno = EAGAIN;
                return -rte_errno;
        }
@@ -604,10 +840,11 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
                             struct rte_eth_link *link)
 
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
        struct ifreq ifr;
        struct rte_eth_link dev_link;
+       struct rte_eth_dev *master = NULL;
        uint64_t sc;
        int ret;
 
@@ -617,17 +854,42 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
                        dev->data->port_id, strerror(rte_errno));
                return ret;
        }
-       memset(&dev_link, 0, sizeof(dev_link));
-       dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
-                               (ifr.ifr_flags & IFF_RUNNING));
-       ifr.ifr_data = (void *)&gcmd;
+       dev_link = (struct rte_eth_link) {
+               .link_status = ((ifr.ifr_flags & IFF_UP) &&
+                               (ifr.ifr_flags & IFF_RUNNING)),
+       };
+       ifr = (struct ifreq) {
+               .ifr_data = (void *)&gcmd,
+       };
        ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
        if (ret) {
-               DRV_LOG(DEBUG,
-                       "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
-                       " failed: %s",
-                       dev->data->port_id, strerror(rte_errno));
-               return ret;
+               if (ret == -ENOTSUP && priv->representor) {
+                       /*
+                        * For representors we can try to inherit link
+                        * settings from the master device. Actually
+                        * link settings do not make a lot of sense
+                        * for representors due to missing physical
+                        * link. The old kernel drivers supported
+                        * emulated settings query for representors,
+                        * the new ones do not, so we have to add
+                        * this code for compatibility issues.
+                        */
+                       master = mlx5_find_master_dev(dev);
+                       if (master) {
+                               ifr = (struct ifreq) {
+                                       .ifr_data = (void *)&gcmd,
+                               };
+                               ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr);
+                       }
+               }
+               if (ret) {
+                       DRV_LOG(DEBUG,
+                               "port %u ioctl(SIOCETHTOOL,"
+                               " ETHTOOL_GLINKSETTINGS) failed: %s",
+                               dev->data->port_id, strerror(rte_errno));
+                       return ret;
+               }
+
        }
        gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
 
@@ -638,11 +900,11 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
 
        *ecmd = gcmd;
        ifr.ifr_data = (void *)ecmd;
-       ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+       ret = mlx5_ifreq(master ? master : dev, SIOCETHTOOL, &ifr);
        if (ret) {
                DRV_LOG(DEBUG,
-                       "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
-                       " failed: %s",
+                       "port %u ioctl(SIOCETHTOOL,"
+                       "ETHTOOL_GLINKSETTINGS) failed: %s",
                        dev->data->port_id, strerror(rte_errno));
                return ret;
        }
@@ -688,8 +950,8 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
                                ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
        dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
                                  ETH_LINK_SPEED_FIXED);
-       if ((dev_link.link_speed && !dev_link.link_status) ||
-           (!dev_link.link_speed && dev_link.link_status)) {
+       if (((dev_link.link_speed && !dev_link.link_status) ||
+            (!dev_link.link_speed && dev_link.link_status))) {
                rte_errno = EAGAIN;
                return -rte_errno;
        }
@@ -718,7 +980,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 
        do {
                ret = mlx5_link_update_unlocked_gs(dev, &dev_link);
-               if (ret)
+               if (ret == -ENOTSUP)
                        ret = mlx5_link_update_unlocked_gset(dev, &dev_link);
                if (ret == 0)
                        break;
@@ -756,7 +1018,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 int
 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        uint16_t kern_mtu = 0;
        int ret;
 
@@ -918,80 +1180,351 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
 }
 
 /**
- * Device status handler.
+ * Handle asynchronous removal event for entire multiport device.
  *
- * @param dev
- *   Pointer to Ethernet device.
- * @param events
- *   Pointer to event flags holder.
+ * @param sh
+ *   Infiniband device shared context.
+ */
+static void
+mlx5_dev_interrupt_device_fatal(struct mlx5_ibv_shared *sh)
+{
+       uint32_t i;
+
+       for (i = 0; i < sh->max_port; ++i) {
+               struct rte_eth_dev *dev;
+
+               if (sh->port[i].ih_port_id >= RTE_MAX_ETHPORTS) {
+                       /*
+                        * Or not existing port either no
+                        * handler installed for this port.
+                        */
+                       continue;
+               }
+               dev = &rte_eth_devices[sh->port[i].ih_port_id];
+               assert(dev);
+               if (dev->data->dev_conf.intr_conf.rmv)
+                       _rte_eth_dev_callback_process
+                               (dev, RTE_ETH_EVENT_INTR_RMV, NULL);
+       }
+}
+
+/**
+ * Handle shared asynchronous events the NIC (removal event
+ * and link status change). Supports multiport IB device.
  *
- * @return
- *   Events bitmap of callback process which can be called immediately.
+ * @param cb_arg
+ *   Callback argument.
  */
-static uint32_t
-mlx5_dev_status_handler(struct rte_eth_dev *dev)
+void
+mlx5_dev_interrupt_handler(void *cb_arg)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_ibv_shared *sh = cb_arg;
        struct ibv_async_event event;
-       uint32_t ret = 0;
 
-       if (mlx5_link_update(dev, 0) == -EAGAIN) {
-               usleep(0);
-               return 0;
-       }
-       /* Read all message and acknowledge them. */
+       /* Read all message from the IB device and acknowledge them. */
        for (;;) {
-               if (mlx5_glue->get_async_event(priv->ctx, &event))
+               struct rte_eth_dev *dev;
+               uint32_t tmp;
+
+               if (mlx5_glue->get_async_event(sh->ctx, &event))
                        break;
-               if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
-                       event.event_type == IBV_EVENT_PORT_ERR) &&
-                       (dev->data->dev_conf.intr_conf.lsc == 1))
-                       ret |= (1 << RTE_ETH_EVENT_INTR_LSC);
-               else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
-                       dev->data->dev_conf.intr_conf.rmv == 1)
-                       ret |= (1 << RTE_ETH_EVENT_INTR_RMV);
-               else
+               /* Retrieve and check IB port index. */
+               tmp = (uint32_t)event.element.port_num;
+               if (!tmp && event.event_type == IBV_EVENT_DEVICE_FATAL) {
+                       /*
+                        * The DEVICE_FATAL event is called once for
+                        * entire device without port specifying.
+                        * We should notify all existing ports.
+                        */
+                       mlx5_glue->ack_async_event(&event);
+                       mlx5_dev_interrupt_device_fatal(sh);
+                       continue;
+               }
+               assert(tmp && (tmp <= sh->max_port));
+               if (!tmp) {
+                       /* Unsupported devive level event. */
+                       mlx5_glue->ack_async_event(&event);
+                       DRV_LOG(DEBUG,
+                               "unsupported common event (type %d)",
+                               event.event_type);
+                       continue;
+               }
+               if (tmp > sh->max_port) {
+                       /* Invalid IB port index. */
+                       mlx5_glue->ack_async_event(&event);
                        DRV_LOG(DEBUG,
-                               "port %u event type %d on not handled",
-                               dev->data->port_id, event.event_type);
+                               "cannot handle an event (type %d)"
+                               "due to invalid IB port index (%u)",
+                               event.event_type, tmp);
+                       continue;
+               }
+               if (sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) {
+                       /* No handler installed. */
+                       mlx5_glue->ack_async_event(&event);
+                       DRV_LOG(DEBUG,
+                               "cannot handle an event (type %d)"
+                               "due to no handler installed for port %u",
+                               event.event_type, tmp);
+                       continue;
+               }
+               /* Retrieve ethernet device descriptor. */
+               tmp = sh->port[tmp - 1].ih_port_id;
+               dev = &rte_eth_devices[tmp];
+               assert(dev);
+               if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
+                    event.event_type == IBV_EVENT_PORT_ERR) &&
+                       dev->data->dev_conf.intr_conf.lsc) {
+                       mlx5_glue->ack_async_event(&event);
+                       if (mlx5_link_update(dev, 0) == -EAGAIN) {
+                               usleep(0);
+                               continue;
+                       }
+                       _rte_eth_dev_callback_process
+                               (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+                       continue;
+               }
+               DRV_LOG(DEBUG,
+                       "port %u cannot handle an unknown event (type %d)",
+                       dev->data->port_id, event.event_type);
                mlx5_glue->ack_async_event(&event);
        }
-       return ret;
+}
+
+/*
+ * Unregister callback handler safely. The handler may be active
+ * while we are trying to unregister it, in this case code -EAGAIN
+ * is returned by rte_intr_callback_unregister(). This routine checks
+ * the return code and tries to unregister handler again.
+ *
+ * @param handle
+ *   interrupt handle
+ * @param cb_fn
+ *   pointer to callback routine
+ * @cb_arg
+ *   opaque callback parameter
+ */
+void
+mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
+                             rte_intr_callback_fn cb_fn, void *cb_arg)
+{
+       /*
+        * Try to reduce timeout management overhead by not calling
+        * the timer related routines on the first iteration. If the
+        * unregistering succeeds on first call there will be no
+        * timer calls at all.
+        */
+       uint64_t twait = 0;
+       uint64_t start = 0;
+
+       do {
+               int ret;
+
+               ret = rte_intr_callback_unregister(handle, cb_fn, cb_arg);
+               if (ret >= 0)
+                       return;
+               if (ret != -EAGAIN) {
+                       DRV_LOG(INFO, "failed to unregister interrupt"
+                                     " handler (error: %d)", ret);
+                       assert(false);
+                       return;
+               }
+               if (twait) {
+                       struct timespec onems;
+
+                       /* Wait one millisecond and try again. */
+                       onems.tv_sec = 0;
+                       onems.tv_nsec = NS_PER_S / MS_PER_S;
+                       nanosleep(&onems, 0);
+                       /* Check whether one second elapsed. */
+                       if ((rte_get_timer_cycles() - start) <= twait)
+                               continue;
+               } else {
+                       /*
+                        * We get the amount of timer ticks for one second.
+                        * If this amount elapsed it means we spent one
+                        * second in waiting. This branch is executed once
+                        * on first iteration.
+                        */
+                       twait = rte_get_timer_hz();
+                       assert(twait);
+               }
+               /*
+                * Timeout elapsed, show message (once a second) and retry.
+                * We have no other acceptable option here, if we ignore
+                * the unregistering return code the handler will not
+                * be unregistered, fd will be closed and we may get the
+                * crush. Hanging and messaging in the loop seems not to be
+                * the worst choice.
+                */
+               DRV_LOG(INFO, "Retrying to unregister interrupt handler");
+               start = rte_get_timer_cycles();
+       } while (true);
 }
 
 /**
- * Handle interrupts from the NIC.
+ * Handle DEVX interrupts from the NIC.
+ * This function is probably called from the DPDK host thread.
  *
- * @param[in] intr_handle
- *   Interrupt handler.
  * @param cb_arg
  *   Callback argument.
  */
 void
-mlx5_dev_interrupt_handler(void *cb_arg)
+mlx5_dev_interrupt_handler_devx(void *cb_arg)
 {
-       struct rte_eth_dev *dev = cb_arg;
-       uint32_t events;
-
-       events = mlx5_dev_status_handler(dev);
-       if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
-               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
-       if (events & (1 << RTE_ETH_EVENT_INTR_RMV))
-               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL);
+#ifndef HAVE_IBV_DEVX_ASYNC
+       (void)cb_arg;
+       return;
+#else
+       struct mlx5_ibv_shared *sh = cb_arg;
+       union {
+               struct mlx5dv_devx_async_cmd_hdr cmd_resp;
+               uint8_t buf[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+                           MLX5_ST_SZ_BYTES(traffic_counter) +
+                           sizeof(struct mlx5dv_devx_async_cmd_hdr)];
+       } out;
+       uint8_t *buf = out.buf + sizeof(out.cmd_resp);
+
+       while (!mlx5_glue->devx_get_async_cmd_comp(sh->devx_comp,
+                                                  &out.cmd_resp,
+                                                  sizeof(out.buf)))
+               mlx5_flow_async_pool_query_handle
+                       (sh, (uint64_t)out.cmd_resp.wr_id,
+                        mlx5_devx_get_out_command_status(buf));
+#endif /* HAVE_IBV_DEVX_ASYNC */
 }
 
 /**
- * Handle interrupts from the socket.
+ * Uninstall shared asynchronous device events handler.
+ * This function is implemented to support event sharing
+ * between multiple ports of single IB device.
  *
- * @param cb_arg
- *   Callback argument.
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+static void
+mlx5_dev_shared_handler_uninstall(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_ibv_shared *sh = priv->sh;
+
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return;
+       pthread_mutex_lock(&sh->intr_mutex);
+       assert(priv->ibv_port);
+       assert(priv->ibv_port <= sh->max_port);
+       assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+       if (sh->port[priv->ibv_port - 1].ih_port_id >= RTE_MAX_ETHPORTS)
+               goto exit;
+       assert(sh->port[priv->ibv_port - 1].ih_port_id ==
+                                       (uint32_t)dev->data->port_id);
+       assert(sh->intr_cnt);
+       sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
+       if (!sh->intr_cnt || --sh->intr_cnt)
+               goto exit;
+       mlx5_intr_callback_unregister(&sh->intr_handle,
+                                    mlx5_dev_interrupt_handler, sh);
+       sh->intr_handle.fd = 0;
+       sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+       if (sh->intr_handle_devx.fd) {
+               rte_intr_callback_unregister(&sh->intr_handle_devx,
+                                            mlx5_dev_interrupt_handler_devx,
+                                            sh);
+               sh->intr_handle_devx.fd = 0;
+               sh->intr_handle_devx.type = RTE_INTR_HANDLE_UNKNOWN;
+       }
+       if (sh->devx_comp) {
+               mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp);
+               sh->devx_comp = NULL;
+       }
+exit:
+       pthread_mutex_unlock(&sh->intr_mutex);
+}
+
+/**
+ * Install shared asynchronous device events handler.
+ * This function is implemented to support event sharing
+ * between multiple ports of single IB device.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
  */
 static void
-mlx5_dev_handler_socket(void *cb_arg)
+mlx5_dev_shared_handler_install(struct rte_eth_dev *dev)
 {
-       struct rte_eth_dev *dev = cb_arg;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_ibv_shared *sh = priv->sh;
+       int ret;
+       int flags;
 
-       mlx5_socket_handle(dev);
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return;
+       pthread_mutex_lock(&sh->intr_mutex);
+       assert(priv->ibv_port);
+       assert(priv->ibv_port <= sh->max_port);
+       assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+       if (sh->port[priv->ibv_port - 1].ih_port_id < RTE_MAX_ETHPORTS) {
+               /* The handler is already installed for this port. */
+               assert(sh->intr_cnt);
+               goto exit;
+       }
+       sh->port[priv->ibv_port - 1].ih_port_id = (uint32_t)dev->data->port_id;
+       if (sh->intr_cnt) {
+               sh->intr_cnt++;
+               goto exit;
+       }
+       /* No shared handler installed. */
+       assert(sh->ctx->async_fd > 0);
+       flags = fcntl(sh->ctx->async_fd, F_GETFL);
+       ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
+       if (ret) {
+               DRV_LOG(INFO, "failed to change file descriptor"
+                             " async event queue");
+               goto error;
+       }
+       sh->intr_handle.fd = sh->ctx->async_fd;
+       sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
+       rte_intr_callback_register(&sh->intr_handle,
+                                  mlx5_dev_interrupt_handler, sh);
+       if (priv->config.devx) {
+#ifndef HAVE_IBV_DEVX_ASYNC
+               goto error_unregister;
+#else
+               sh->devx_comp = mlx5_glue->devx_create_cmd_comp(sh->ctx);
+               if (sh->devx_comp) {
+                       flags = fcntl(sh->devx_comp->fd, F_GETFL);
+                       ret = fcntl(sh->devx_comp->fd, F_SETFL,
+                                   flags | O_NONBLOCK);
+                       if (ret) {
+                               DRV_LOG(INFO, "failed to change file descriptor"
+                                             " devx async event queue");
+                               goto error_unregister;
+                       }
+                       sh->intr_handle_devx.fd = sh->devx_comp->fd;
+                       sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT;
+                       rte_intr_callback_register
+                               (&sh->intr_handle_devx,
+                                mlx5_dev_interrupt_handler_devx, sh);
+               } else {
+                       DRV_LOG(INFO, "failed to create devx async command "
+                               "completion");
+                       goto error_unregister;
+               }
+#endif /* HAVE_IBV_DEVX_ASYNC */
+       }
+       sh->intr_cnt++;
+       goto exit;
+error_unregister:
+       rte_intr_callback_unregister(&sh->intr_handle,
+                                    mlx5_dev_interrupt_handler, sh);
+error:
+       /* Indicate there will be no interrupts. */
+       dev->data->dev_conf.intr_conf.lsc = 0;
+       dev->data->dev_conf.intr_conf.rmv = 0;
+       sh->intr_handle.fd = 0;
+       sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+       sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
+exit:
+       pthread_mutex_unlock(&sh->intr_mutex);
 }
 
 /**
@@ -1003,19 +1536,7 @@ mlx5_dev_handler_socket(void *cb_arg)
 void
 mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
 {
-       struct priv *priv = dev->data->dev_private;
-
-       if (dev->data->dev_conf.intr_conf.lsc ||
-           dev->data->dev_conf.intr_conf.rmv)
-               rte_intr_callback_unregister(&priv->intr_handle,
-                                            mlx5_dev_interrupt_handler, dev);
-       if (priv->primary_socket)
-               rte_intr_callback_unregister(&priv->intr_handle_socket,
-                                            mlx5_dev_handler_socket, dev);
-       priv->intr_handle.fd = 0;
-       priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
-       priv->intr_handle_socket.fd = 0;
-       priv->intr_handle_socket.type = RTE_INTR_HANDLE_UNKNOWN;
+       mlx5_dev_shared_handler_uninstall(dev);
 }
 
 /**
@@ -1027,38 +1548,7 @@ mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
 void
 mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
 {
-       struct priv *priv = dev->data->dev_private;
-       int ret;
-       int flags;
-
-       assert(priv->ctx->async_fd > 0);
-       flags = fcntl(priv->ctx->async_fd, F_GETFL);
-       ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
-       if (ret) {
-               DRV_LOG(INFO,
-                       "port %u failed to change file descriptor async event"
-                       " queue",
-                       dev->data->port_id);
-               dev->data->dev_conf.intr_conf.lsc = 0;
-               dev->data->dev_conf.intr_conf.rmv = 0;
-       }
-       if (dev->data->dev_conf.intr_conf.lsc ||
-           dev->data->dev_conf.intr_conf.rmv) {
-               priv->intr_handle.fd = priv->ctx->async_fd;
-               priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
-               rte_intr_callback_register(&priv->intr_handle,
-                                          mlx5_dev_interrupt_handler, dev);
-       }
-       ret = mlx5_socket_init(dev);
-       if (ret)
-               DRV_LOG(ERR, "port %u cannot initialise socket: %s",
-                       dev->data->port_id, strerror(rte_errno));
-       else if (priv->primary_socket) {
-               priv->intr_handle_socket.fd = priv->primary_socket;
-               priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
-               rte_intr_callback_register(&priv->intr_handle_socket,
-                                          mlx5_dev_handler_socket, dev);
-       }
+       mlx5_dev_shared_handler_install(dev);
 }
 
 /**
@@ -1091,64 +1581,6 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
        return mlx5_set_flags(dev, ~IFF_UP, IFF_UP);
 }
 
-/**
- * Configure the TX function to use.
- *
- * @param dev
- *   Pointer to private data structure.
- *
- * @return
- *   Pointer to selected Tx burst function.
- */
-eth_tx_burst_t
-mlx5_select_tx_function(struct rte_eth_dev *dev)
-{
-       struct priv *priv = dev->data->dev_private;
-       eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
-       struct mlx5_dev_config *config = &priv->config;
-       uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-       int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-                                   DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-                                   DEV_TX_OFFLOAD_GRE_TNL_TSO |
-                                   DEV_TX_OFFLOAD_IP_TNL_TSO |
-                                   DEV_TX_OFFLOAD_UDP_TNL_TSO));
-       int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
-                                   DEV_TX_OFFLOAD_UDP_TNL_TSO |
-                                   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM));
-       int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT);
-
-       assert(priv != NULL);
-       /* Select appropriate TX function. */
-       if (vlan_insert || tso || swp)
-               return tx_pkt_burst;
-       if (config->mps == MLX5_MPW_ENHANCED) {
-               if (mlx5_check_vec_tx_support(dev) > 0) {
-                       if (mlx5_check_raw_vec_tx_support(dev) > 0)
-                               tx_pkt_burst = mlx5_tx_burst_raw_vec;
-                       else
-                               tx_pkt_burst = mlx5_tx_burst_vec;
-                       DRV_LOG(DEBUG,
-                               "port %u selected enhanced MPW Tx vectorized"
-                               " function",
-                               dev->data->port_id);
-               } else {
-                       tx_pkt_burst = mlx5_tx_burst_empw;
-                       DRV_LOG(DEBUG,
-                               "port %u selected enhanced MPW Tx function",
-                               dev->data->port_id);
-               }
-       } else if (config->mps && (config->txq_inline > 0)) {
-               tx_pkt_burst = mlx5_tx_burst_mpw_inline;
-               DRV_LOG(DEBUG, "port %u selected MPW inline Tx function",
-                       dev->data->port_id);
-       } else if (config->mps) {
-               tx_pkt_burst = mlx5_tx_burst_mpw;
-               DRV_LOG(DEBUG, "port %u selected MPW Tx function",
-                       dev->data->port_id);
-       }
-       return tx_pkt_burst;
-}
-
 /**
  * Configure the RX function to use.
  *
@@ -1187,9 +1619,311 @@ int
 mlx5_is_removed(struct rte_eth_dev *dev)
 {
        struct ibv_device_attr device_attr;
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
-       if (mlx5_glue->query_device(priv->ctx, &device_attr) == EIO)
+       if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO)
                return 1;
        return 0;
 }
+
+/**
+ * Get port ID list of mlx5 instances sharing a common device.
+ *
+ * @param[in] dev
+ *   Device to look for.
+ * @param[out] port_list
+ *   Result buffer for collected port IDs.
+ * @param port_list_n
+ *   Maximum number of entries in result buffer. If 0, @p port_list can be
+ *   NULL.
+ *
+ * @return
+ *   Number of matching instances regardless of the @p port_list_n
+ *   parameter, 0 if none were found.
+ */
+unsigned int
+mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list,
+                   unsigned int port_list_n)
+{
+       uint16_t id;
+       unsigned int n = 0;
+
+       RTE_ETH_FOREACH_DEV_OF(id, dev) {
+               if (n < port_list_n)
+                       port_list[n] = id;
+               n++;
+       }
+       return n;
+}
+
+/**
+ * Get the E-Switch domain id this port belongs to.
+ *
+ * @param[in] port
+ *   Device port id.
+ * @param[out] es_domain_id
+ *   E-Switch domain id.
+ * @param[out] es_port_id
+ *   The port id of the port in the E-Switch.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_port_to_eswitch_info(uint16_t port,
+                         uint16_t *es_domain_id, uint16_t *es_port_id)
+{
+       struct rte_eth_dev *dev;
+       struct mlx5_priv *priv;
+
+       if (port >= RTE_MAX_ETHPORTS) {
+               rte_errno = EINVAL;
+               return -rte_errno;
+       }
+       if (!rte_eth_dev_is_valid_port(port)) {
+               rte_errno = ENODEV;
+               return -rte_errno;
+       }
+       dev = &rte_eth_devices[port];
+       priv = dev->data->dev_private;
+       if (!(priv->representor || priv->master)) {
+               rte_errno = EINVAL;
+               return -rte_errno;
+       }
+       if (es_domain_id)
+               *es_domain_id = priv->domain_id;
+       if (es_port_id)
+               *es_port_id = priv->vport_id;
+       return 0;
+}
+
+/**
+ * Get switch information associated with network interface.
+ *
+ * @param ifindex
+ *   Network interface index.
+ * @param[out] info
+ *   Switch information object, populated in case of success.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info)
+{
+       char ifname[IF_NAMESIZE];
+       char port_name[IF_NAMESIZE];
+       FILE *file;
+       struct mlx5_switch_info data = {
+               .master = 0,
+               .representor = 0,
+               .name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET,
+               .port_name = 0,
+               .switch_id = 0,
+       };
+       DIR *dir;
+       bool port_switch_id_set = false;
+       bool device_dir = false;
+       char c;
+       int ret;
+
+       if (!if_indextoname(ifindex, ifname)) {
+               rte_errno = errno;
+               return -rte_errno;
+       }
+
+       MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name",
+             ifname);
+       MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id",
+             ifname);
+       MKSTR(pci_device, "/sys/class/net/%s/device",
+             ifname);
+
+       file = fopen(phys_port_name, "rb");
+       if (file != NULL) {
+               ret = fscanf(file, "%s", port_name);
+               fclose(file);
+               if (ret == 1)
+                       mlx5_translate_port_name(port_name, &data);
+       }
+       file = fopen(phys_switch_id, "rb");
+       if (file == NULL) {
+               rte_errno = errno;
+               return -rte_errno;
+       }
+       port_switch_id_set =
+               fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 &&
+               c == '\n';
+       fclose(file);
+       dir = opendir(pci_device);
+       if (dir != NULL) {
+               closedir(dir);
+               device_dir = true;
+       }
+       if (port_switch_id_set) {
+               /* We have some E-Switch configuration. */
+               mlx5_sysfs_check_switch_info(device_dir, &data);
+       }
+       *info = data;
+       assert(!(data.master && data.representor));
+       if (data.master && data.representor) {
+               DRV_LOG(ERR, "ifindex %u device is recognized as master"
+                            " and as representor", ifindex);
+               rte_errno = ENODEV;
+               return -rte_errno;
+       }
+       return 0;
+}
+
+/**
+ * Analyze gathered port parameters via Netlink to recognize master
+ * and representor devices for E-Switch configuration.
+ *
+ * @param[in] num_vf_set
+ *   flag of presence of number of VFs port attribute.
+ * @param[inout] switch_info
+ *   Port information, including port name as a number and port name
+ *   type if recognized
+ *
+ * @return
+ *   master and representor flags are set in switch_info according to
+ *   recognized parameters (if any).
+ */
+void
+mlx5_nl_check_switch_info(bool num_vf_set,
+                         struct mlx5_switch_info *switch_info)
+{
+       switch (switch_info->name_type) {
+       case MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN:
+               /*
+                * Name is not recognized, assume the master,
+                * check the number of VFs key presence.
+                */
+               switch_info->master = num_vf_set;
+               break;
+       case MLX5_PHYS_PORT_NAME_TYPE_NOTSET:
+               /*
+                * Name is not set, this assumes the legacy naming
+                * schema for master, just check if there is a
+                * number of VFs key.
+                */
+               switch_info->master = num_vf_set;
+               break;
+       case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
+               /* New uplink naming schema recognized. */
+               switch_info->master = 1;
+               break;
+       case MLX5_PHYS_PORT_NAME_TYPE_LEGACY:
+               /* Legacy representors naming schema. */
+               switch_info->representor = !num_vf_set;
+               break;
+       case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
+               /* New representors naming schema. */
+               switch_info->representor = 1;
+               break;
+       }
+}
+
+/**
+ * Analyze gathered port parameters via sysfs to recognize master
+ * and representor devices for E-Switch configuration.
+ *
+ * @param[in] device_dir
+ *   flag of presence of "device" directory under port device key.
+ * @param[inout] switch_info
+ *   Port information, including port name as a number and port name
+ *   type if recognized
+ *
+ * @return
+ *   master and representor flags are set in switch_info according to
+ *   recognized parameters (if any).
+ */
+void
+mlx5_sysfs_check_switch_info(bool device_dir,
+                            struct mlx5_switch_info *switch_info)
+{
+       switch (switch_info->name_type) {
+       case MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN:
+               /*
+                * Name is not recognized, assume the master,
+                * check the device directory presence.
+                */
+               switch_info->master = device_dir;
+               break;
+       case MLX5_PHYS_PORT_NAME_TYPE_NOTSET:
+               /*
+                * Name is not set, this assumes the legacy naming
+                * schema for master, just check if there is
+                * a device directory.
+                */
+               switch_info->master = device_dir;
+               break;
+       case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
+               /* New uplink naming schema recognized. */
+               switch_info->master = 1;
+               break;
+       case MLX5_PHYS_PORT_NAME_TYPE_LEGACY:
+               /* Legacy representors naming schema. */
+               switch_info->representor = !device_dir;
+               break;
+       case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
+               /* New representors naming schema. */
+               switch_info->representor = 1;
+               break;
+       }
+}
+
+/**
+ * Extract port name, as a number, from sysfs or netlink information.
+ *
+ * @param[in] port_name_in
+ *   String representing the port name.
+ * @param[out] port_info_out
+ *   Port information, including port name as a number and port name
+ *   type if recognized
+ *
+ * @return
+ *   port_name field set according to recognized name format.
+ */
+void
+mlx5_translate_port_name(const char *port_name_in,
+                        struct mlx5_switch_info *port_info_out)
+{
+       char pf_c1, pf_c2, vf_c1, vf_c2;
+       char *end;
+       int sc_items;
+
+       /*
+        * Check for port-name as a string of the form pf0vf0
+        * (support kernel ver >= 5.0 or OFED ver >= 4.6).
+        */
+       sc_items = sscanf(port_name_in, "%c%c%d%c%c%d",
+                         &pf_c1, &pf_c2, &port_info_out->pf_num,
+                         &vf_c1, &vf_c2, &port_info_out->port_name);
+       if (sc_items == 6 &&
+           pf_c1 == 'p' && pf_c2 == 'f' &&
+           vf_c1 == 'v' && vf_c2 == 'f') {
+               port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_PFVF;
+               return;
+       }
+       /*
+        * Check for port-name as a string of the form p0
+        * (support kernel ver >= 5.0, or OFED ver >= 4.6).
+        */
+       sc_items = sscanf(port_name_in, "%c%d",
+                         &pf_c1, &port_info_out->port_name);
+       if (sc_items == 2 && pf_c1 == 'p') {
+               port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK;
+               return;
+       }
+       /* Check for port-name as a number (support kernel ver < 5.0 */
+       errno = 0;
+       port_info_out->port_name = strtol(port_name_in, &end, 0);
+       if (!errno &&
+           (size_t)(end - port_name_in) == strlen(port_name_in)) {
+               port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_LEGACY;
+               return;
+       }
+       port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN;
+       return;
+}