net/mlx5: add flow translate function
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 3a44b04..7493180 100644 (file)
@@ -13,6 +13,7 @@
 #include <errno.h>
 #include <net/if.h>
 #include <sys/mman.h>
+#include <linux/netlink.h>
 #include <linux/rtnetlink.h>
 
 /* Verbs header. */
@@ -45,6 +46,7 @@
 #include "mlx5_defs.h"
 #include "mlx5_glue.h"
 #include "mlx5_mr.h"
+#include "mlx5_flow.h"
 
 /* Device parameter to enable RX completion queue compression. */
 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
@@ -91,6 +93,9 @@
 /* Activate Netlink support in VF mode. */
 #define MLX5_VF_NL_EN "vf_nl_en"
 
+/* Select port representors to instantiate. */
+#define MLX5_REPRESENTOR "representor"
+
 #ifndef HAVE_IBV_MLX5_MOD_MPW
 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
@@ -238,6 +243,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        /* In case mlx5_dev_stop() has not been called. */
        mlx5_dev_interrupt_handler_uninstall(dev);
        mlx5_traffic_disable(dev);
+       mlx5_flow_flush(dev, NULL);
        /* Prevent crashes when queues are still in use. */
        dev->rx_pkt_burst = removed_rx_burst;
        dev->tx_pkt_burst = removed_tx_burst;
@@ -257,7 +263,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                priv->txqs_n = 0;
                priv->txqs = NULL;
        }
-       mlx5_flow_delete_drop_queue(dev);
        mlx5_mprq_free_mp(dev);
        mlx5_mr_release(dev);
        if (priv->pd != NULL) {
@@ -274,8 +279,12 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                mlx5_socket_uninit(dev);
        if (priv->config.vf)
                mlx5_nl_mac_addr_flush(dev);
-       if (priv->nl_socket >= 0)
-               close(priv->nl_socket);
+       if (priv->nl_socket_route >= 0)
+               close(priv->nl_socket_route);
+       if (priv->nl_socket_rdma >= 0)
+               close(priv->nl_socket_rdma);
+       if (priv->mnl_socket)
+               mlx5_nl_flow_socket_destroy(priv->mnl_socket);
        ret = mlx5_hrxq_ibv_verify(dev);
        if (ret)
                DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
@@ -304,7 +313,27 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        if (ret)
                DRV_LOG(WARNING, "port %u some flows still remain",
                        dev->data->port_id);
+       if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
+               unsigned int c = 0;
+               unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0);
+               uint16_t port_id[i];
+
+               i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
+               while (i--) {
+                       struct priv *opriv =
+                               rte_eth_devices[port_id[i]].data->dev_private;
+
+                       if (!opriv ||
+                           opriv->domain_id != priv->domain_id ||
+                           &rte_eth_devices[port_id[i]] == dev)
+                               continue;
+                       ++c;
+               }
+               if (!c)
+                       claim_zero(rte_eth_switch_domain_free(priv->domain_id));
+       }
        memset(priv, 0, sizeof(*priv));
+       priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
 }
 
 const struct eth_dev_ops mlx5_dev_ops = {
@@ -371,6 +400,10 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
        .dev_set_link_down = mlx5_set_link_down,
        .dev_set_link_up = mlx5_set_link_up,
        .dev_close = mlx5_dev_close,
+       .promiscuous_enable = mlx5_promiscuous_enable,
+       .promiscuous_disable = mlx5_promiscuous_disable,
+       .allmulticast_enable = mlx5_allmulticast_enable,
+       .allmulticast_disable = mlx5_allmulticast_disable,
        .link_update = mlx5_link_update,
        .stats_get = mlx5_stats_get,
        .stats_reset = mlx5_stats_reset,
@@ -420,6 +453,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
        struct mlx5_dev_config *config = opaque;
        unsigned long tmp;
 
+       /* No-op, port representors are processed in mlx5_dev_spawn(). */
+       if (!strcmp(MLX5_REPRESENTOR, key))
+               return 0;
        errno = 0;
        tmp = strtoul(val, NULL, 0);
        if (errno) {
@@ -442,7 +478,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
        } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
                config->txqs_inline = tmp;
        } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
-               config->mps = !!tmp ? config->mps : 0;
+               config->mps = !!tmp;
        } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
                config->mpw_hdr_dseg = !!tmp;
        } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
@@ -492,6 +528,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
                MLX5_RX_VEC_EN,
                MLX5_L3_VXLAN_EN,
                MLX5_VF_NL_EN,
+               MLX5_REPRESENTOR,
                NULL,
        };
        struct rte_kvargs *kvlist;
@@ -532,11 +569,13 @@ static struct rte_pci_driver mlx5_driver;
 static void *uar_base;
 
 static int
-find_lower_va_bound(const struct rte_memseg_list *msl __rte_unused,
+find_lower_va_bound(const struct rte_memseg_list *msl,
                const struct rte_memseg *ms, void *arg)
 {
        void **addr = arg;
 
+       if (msl->external)
+               return 0;
        if (*addr == NULL)
                *addr = ms->addr;
        else
@@ -568,7 +607,7 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
        rte_memseg_walk(find_lower_va_bound, &addr);
 
        /* keep distance to hugepages to minimize potential conflicts. */
-       addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE);
+       addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX5_UAR_OFFSET + MLX5_UAR_SIZE));
        /* anonymous mmap, no real memory consumption. */
        addr = mmap(addr, MLX5_UAR_SIZE,
                    PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
@@ -644,15 +683,20 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev)
  *   Verbs device.
  * @param vf
  *   If nonzero, enable VF-specific features.
+ * @param[in] switch_info
+ *   Switch properties of Ethernet device.
  *
  * @return
  *   A valid Ethernet device object on success, NULL otherwise and rte_errno
- *   is set.
+ *   is set. The following error is defined:
+ *
+ *   EBUSY: device is not supposed to be spawned.
  */
 static struct rte_eth_dev *
 mlx5_dev_spawn(struct rte_device *dpdk_dev,
               struct ibv_device *ibv_dev,
-              int vf)
+              int vf,
+              const struct mlx5_switch_info *switch_info)
 {
        struct ibv_context *ctx;
        struct ibv_device_attr_ex attr;
@@ -661,6 +705,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        struct mlx5dv_context dv_attr = { .comp_mask = 0 };
        struct mlx5_dev_config config = {
                .vf = !!vf,
+               .mps = MLX5_ARG_UNSET,
                .tx_vec_en = 1,
                .rx_vec_en = 1,
                .mpw_hdr_dseg = 0,
@@ -683,7 +728,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        unsigned int tunnel_en = 0;
        unsigned int mpls_en = 0;
        unsigned int swp = 0;
-       unsigned int verb_priorities = 0;
        unsigned int mprq = 0;
        unsigned int mprq_min_stride_size_n = 0;
        unsigned int mprq_max_stride_size_n = 0;
@@ -694,7 +738,29 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 #endif
        struct ether_addr mac;
        char name[RTE_ETH_NAME_MAX_LEN];
+       int own_domain_id = 0;
+       unsigned int i;
+
+       /* Determine if this port representor is supposed to be spawned. */
+       if (switch_info->representor && dpdk_dev->devargs) {
+               struct rte_eth_devargs eth_da;
 
+               err = rte_eth_devargs_parse(dpdk_dev->devargs->args, &eth_da);
+               if (err) {
+                       rte_errno = -err;
+                       DRV_LOG(ERR, "failed to process device arguments: %s",
+                               strerror(rte_errno));
+                       return NULL;
+               }
+               for (i = 0; i < eth_da.nb_representor_ports; ++i)
+                       if (eth_da.representor_ports[i] ==
+                           (uint16_t)switch_info->port_name)
+                               break;
+               if (i == eth_da.nb_representor_ports) {
+                       rte_errno = EBUSY;
+                       return NULL;
+               }
+       }
        /* Prepare shared data between primary and secondary process. */
        mlx5_prepare_shared_data();
        errno = 0;
@@ -729,7 +795,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                DRV_LOG(DEBUG, "MPW isn't supported");
                mps = MLX5_MPW_DISABLED;
        }
-       config.mps = mps;
 #ifdef HAVE_IBV_MLX5_MOD_SWP
        if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
                swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
@@ -802,7 +867,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                DEBUG("ibv_query_device_ex() failed");
                goto error;
        }
-       rte_strlcpy(name, dpdk_dev->name, sizeof(name));
+       if (!switch_info->representor)
+               rte_strlcpy(name, dpdk_dev->name, sizeof(name));
+       else
+               snprintf(name, sizeof(name), "%s_representor_%u",
+                        dpdk_dev->name, switch_info->port_name);
+       DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
        if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
                eth_dev = rte_eth_dev_attach_secondary(name);
                if (eth_dev == NULL) {
@@ -871,11 +941,58 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                goto error;
        }
        priv->ctx = ctx;
+       strncpy(priv->ibdev_name, priv->ctx->device->name,
+               sizeof(priv->ibdev_name));
        strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
                sizeof(priv->ibdev_path));
        priv->device_attr = attr;
        priv->pd = pd;
        priv->mtu = ETHER_MTU;
+#ifndef RTE_ARCH_64
+       /* Initialize UAR access locks for 32bit implementations. */
+       rte_spinlock_init(&priv->uar_lock_cq);
+       for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+               rte_spinlock_init(&priv->uar_lock[i]);
+#endif
+       /* Some internal functions rely on Netlink sockets, open them now. */
+       priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
+       priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
+       priv->nl_sn = 0;
+       priv->representor = !!switch_info->representor;
+       priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
+       priv->representor_id =
+               switch_info->representor ? switch_info->port_name : -1;
+       /*
+        * Look for sibling devices in order to reuse their switch domain
+        * if any, otherwise allocate one.
+        */
+       i = mlx5_dev_to_port_id(dpdk_dev, NULL, 0);
+       if (i > 0) {
+               uint16_t port_id[i];
+
+               i = RTE_MIN(mlx5_dev_to_port_id(dpdk_dev, port_id, i), i);
+               while (i--) {
+                       const struct priv *opriv =
+                               rte_eth_devices[port_id[i]].data->dev_private;
+
+                       if (!opriv ||
+                           opriv->domain_id ==
+                           RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
+                               continue;
+                       priv->domain_id = opriv->domain_id;
+                       break;
+               }
+       }
+       if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
+               err = rte_eth_switch_domain_alloc(&priv->domain_id);
+               if (err) {
+                       err = rte_errno;
+                       DRV_LOG(ERR, "unable to allocate switch domain: %s",
+                               strerror(rte_errno));
+                       goto error;
+               }
+               own_domain_id = 1;
+       }
        err = mlx5_args(&config, dpdk_dev->devargs);
        if (err) {
                err = rte_errno;
@@ -921,13 +1038,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                       (1 << IBV_QPT_RAW_PACKET)));
        if (config.tso)
                config.tso_max_payload_sz = attr.tso_caps.max_tso;
-       if (config.mps && !mps) {
-               DRV_LOG(ERR,
-                       "multi-packet send not supported on this device"
-                       " (" MLX5_TXQ_MPW_EN ")");
-               err = ENOTSUP;
-               goto error;
-       }
+       /*
+        * MPW is disabled by default, while the Enhanced MPW is enabled
+        * by default.
+        */
+       if (config.mps == MLX5_ARG_UNSET)
+               config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
+                                                         MLX5_MPW_DISABLED;
+       else
+               config.mps = config.mps ? mps : MLX5_MPW_DISABLED;
        DRV_LOG(INFO, "%sMPS is %s",
                config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
                config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
@@ -959,6 +1078,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                err = ENOMEM;
                goto error;
        }
+       if (priv->representor)
+               eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
        eth_dev->data->dev_private = priv;
        priv->dev_data = eth_dev->data;
        eth_dev->data->mac_addrs = priv->mac;
@@ -1010,13 +1131,35 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        eth_dev->dev_ops = &mlx5_dev_ops;
        /* Register MAC address. */
        claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
-       priv->nl_socket = -1;
-       priv->nl_sn = 0;
-       if (vf && config.vf_nl_en) {
-               priv->nl_socket = mlx5_nl_init(RTMGRP_LINK);
-               if (priv->nl_socket < 0)
-                       priv->nl_socket = -1;
+       if (vf && config.vf_nl_en)
                mlx5_nl_mac_addr_sync(eth_dev);
+       priv->mnl_socket = mlx5_nl_flow_socket_create();
+       if (!priv->mnl_socket) {
+               err = -rte_errno;
+               DRV_LOG(WARNING,
+                       "flow rules relying on switch offloads will not be"
+                       " supported: cannot open libmnl socket: %s",
+                       strerror(rte_errno));
+       } else {
+               struct rte_flow_error error;
+               unsigned int ifindex = mlx5_ifindex(eth_dev);
+
+               if (!ifindex) {
+                       err = -rte_errno;
+                       error.message =
+                               "cannot retrieve network interface index";
+               } else {
+                       err = mlx5_nl_flow_init(priv->mnl_socket, ifindex,
+                                               &error);
+               }
+               if (err) {
+                       DRV_LOG(WARNING,
+                               "flow rules relying on switch offloads will"
+                               " not be supported: %s: %s",
+                               error.message, strerror(rte_errno));
+                       mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+                       priv->mnl_socket = NULL;
+               }
        }
        TAILQ_INIT(&priv->flows);
        TAILQ_INIT(&priv->ctrl_flows);
@@ -1040,24 +1183,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        mlx5_link_update(eth_dev, 0);
        /* Store device configuration on private structure. */
        priv->config = config;
-       /* Create drop queue. */
-       err = mlx5_flow_create_drop_queue(eth_dev);
-       if (err) {
-               DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
-                       eth_dev->data->port_id, strerror(rte_errno));
-               err = rte_errno;
-               goto error;
-       }
        /* Supported Verbs flow priority number detection. */
-       if (verb_priorities == 0)
-               verb_priorities = mlx5_get_max_verbs_prio(eth_dev);
-       if (verb_priorities < MLX5_VERBS_FLOW_PRIO_8) {
-               DRV_LOG(ERR, "port %u wrong Verbs flow priorities: %u",
-                       eth_dev->data->port_id, verb_priorities);
-               err = ENOTSUP;
+       err = mlx5_flow_discover_priorities(eth_dev);
+       if (err < 0)
                goto error;
-       }
-       priv->config.max_verbs_prio = verb_priorities;
+       priv->config.flow_prio = err;
+       mlx5_flow_init_driver_ops(eth_dev);
        /*
         * Once the device is added to the list of memory event
         * callback, its global MR cache table cannot be expanded
@@ -1078,8 +1209,17 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
        return eth_dev;
 error:
-       if (priv)
+       if (priv) {
+               if (priv->nl_socket_route >= 0)
+                       close(priv->nl_socket_route);
+               if (priv->nl_socket_rdma >= 0)
+                       close(priv->nl_socket_rdma);
+               if (priv->mnl_socket)
+                       mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+               if (own_domain_id)
+                       claim_zero(rte_eth_switch_domain_free(priv->domain_id));
                rte_free(priv);
+       }
        if (pd)
                claim_zero(mlx5_glue->dealloc_pd(pd));
        if (eth_dev)
@@ -1091,10 +1231,56 @@ error:
        return NULL;
 }
 
+/** Data associated with devices to spawn. */
+struct mlx5_dev_spawn_data {
+       unsigned int ifindex; /**< Network interface index. */
+       struct mlx5_switch_info info; /**< Switch information. */
+       struct ibv_device *ibv_dev; /**< Associated IB device. */
+       struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
+};
+
+/**
+ * Comparison callback to sort device data.
+ *
+ * This is meant to be used with qsort().
+ *
+ * @param a[in]
+ *   Pointer to pointer to first data object.
+ * @param b[in]
+ *   Pointer to pointer to second data object.
+ *
+ * @return
+ *   0 if both objects are equal, less than 0 if the first argument is less
+ *   than the second, greater than 0 otherwise.
+ */
+static int
+mlx5_dev_spawn_data_cmp(const void *a, const void *b)
+{
+       const struct mlx5_switch_info *si_a =
+               &((const struct mlx5_dev_spawn_data *)a)->info;
+       const struct mlx5_switch_info *si_b =
+               &((const struct mlx5_dev_spawn_data *)b)->info;
+       int ret;
+
+       /* Master device first. */
+       ret = si_b->master - si_a->master;
+       if (ret)
+               return ret;
+       /* Then representor devices. */
+       ret = si_b->representor - si_a->representor;
+       if (ret)
+               return ret;
+       /* Unidentified devices come last in no specific order. */
+       if (!si_a->representor)
+               return 0;
+       /* Order representors by name. */
+       return si_a->port_name - si_b->port_name;
+}
+
 /**
  * DPDK callback to register a PCI device.
  *
- * This function spawns an Ethernet device out of a given PCI device.
+ * This function spawns Ethernet devices out of a given PCI device.
  *
  * @param[in] pci_drv
  *   PCI driver structure (mlx5_driver).
@@ -1109,7 +1295,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
               struct rte_pci_device *pci_dev)
 {
        struct ibv_device **ibv_list;
-       struct rte_eth_dev *eth_dev = NULL;
+       unsigned int n = 0;
        int vf;
        int ret;
 
@@ -1121,6 +1307,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
                return -rte_errno;
        }
+
+       struct ibv_device *ibv_match[ret + 1];
+
        while (ret-- > 0) {
                struct rte_pci_addr pci_addr;
 
@@ -1132,10 +1321,79 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                    pci_dev->addr.devid != pci_addr.devid ||
                    pci_dev->addr.function != pci_addr.function)
                        continue;
-               DRV_LOG(INFO, "PCI information matches, using device \"%s\"",
+               DRV_LOG(INFO, "PCI information matches for device \"%s\"",
                        ibv_list[ret]->name);
-               break;
+               ibv_match[n++] = ibv_list[ret];
+       }
+       ibv_match[n] = NULL;
+
+       struct mlx5_dev_spawn_data list[n];
+       int nl_route = n ? mlx5_nl_init(NETLINK_ROUTE) : -1;
+       int nl_rdma = n ? mlx5_nl_init(NETLINK_RDMA) : -1;
+       unsigned int i;
+       unsigned int u;
+
+       /*
+        * The existence of several matching entries (n > 1) means port
+        * representors have been instantiated. No existing Verbs call nor
+        * /sys entries can tell them apart, this can only be done through
+        * Netlink calls assuming kernel drivers are recent enough to
+        * support them.
+        *
+        * In the event of identification failure through Netlink, try again
+        * through sysfs, then either:
+        *
+        * 1. No device matches (n == 0), complain and bail out.
+        * 2. A single IB device matches (n == 1) and is not a representor,
+        *    assume no switch support.
+        * 3. Otherwise no safe assumptions can be made; complain louder and
+        *    bail out.
+        */
+       for (i = 0; i != n; ++i) {
+               list[i].ibv_dev = ibv_match[i];
+               list[i].eth_dev = NULL;
+               if (nl_rdma < 0)
+                       list[i].ifindex = 0;
+               else
+                       list[i].ifindex = mlx5_nl_ifindex
+                               (nl_rdma, list[i].ibv_dev->name);
+               if (nl_route < 0 ||
+                   !list[i].ifindex ||
+                   mlx5_nl_switch_info(nl_route, list[i].ifindex,
+                                       &list[i].info) ||
+                   ((!list[i].info.representor && !list[i].info.master) &&
+                    mlx5_sysfs_switch_info(list[i].ifindex, &list[i].info))) {
+                       list[i].ifindex = 0;
+                       memset(&list[i].info, 0, sizeof(list[i].info));
+                       continue;
+               }
+       }
+       if (nl_rdma >= 0)
+               close(nl_rdma);
+       if (nl_route >= 0)
+               close(nl_route);
+       /* Count unidentified devices. */
+       for (u = 0, i = 0; i != n; ++i)
+               if (!list[i].info.master && !list[i].info.representor)
+                       ++u;
+       if (u) {
+               if (n == 1 && u == 1) {
+                       /* Case #2. */
+                       DRV_LOG(INFO, "no switch support detected");
+               } else {
+                       /* Case #3. */
+                       DRV_LOG(ERR,
+                               "unable to tell which of the matching devices"
+                               " is the master (lack of kernel support?)");
+                       n = 0;
+               }
        }
+       /*
+        * Sort list to probe devices in natural order for users convenience
+        * (i.e. master first, then representors from lowest to highest ID).
+        */
+       if (n)
+               qsort(list, n, sizeof(*list), mlx5_dev_spawn_data_cmp);
        switch (pci_dev->id.device_id) {
        case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
        case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
@@ -1146,10 +1404,25 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
        default:
                vf = 0;
        }
-       if (ret >= 0)
-               eth_dev = mlx5_dev_spawn(&pci_dev->device, ibv_list[ret], vf);
+       for (i = 0; i != n; ++i) {
+               uint32_t restore;
+
+               list[i].eth_dev = mlx5_dev_spawn
+                       (&pci_dev->device, list[i].ibv_dev, vf, &list[i].info);
+               if (!list[i].eth_dev) {
+                       if (rte_errno != EBUSY)
+                               break;
+                       /* Device is disabled, ignore it. */
+                       continue;
+               }
+               restore = list[i].eth_dev->data->dev_flags;
+               rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
+               /* Restore non-PCI flags cleared by the above call. */
+               list[i].eth_dev->data->dev_flags |= restore;
+               rte_eth_dev_probing_finish(list[i].eth_dev);
+       }
        mlx5_glue->free_device_list(ibv_list);
-       if (!ret) {
+       if (!n) {
                DRV_LOG(WARNING,
                        "no Verbs device matches PCI device " PCI_PRI_FMT ","
                        " are kernel drivers loaded?",
@@ -1157,7 +1430,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                        pci_dev->addr.devid, pci_dev->addr.function);
                rte_errno = ENOENT;
                ret = -rte_errno;
-       } else if (!eth_dev) {
+       } else if (i != n) {
                DRV_LOG(ERR,
                        "probe of PCI device " PCI_PRI_FMT " aborted after"
                        " encountering an error: %s",
@@ -1165,9 +1438,18 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                        pci_dev->addr.devid, pci_dev->addr.function,
                        strerror(rte_errno));
                ret = -rte_errno;
+               /* Roll back. */
+               while (i--) {
+                       if (!list[i].eth_dev)
+                               continue;
+                       mlx5_dev_close(list[i].eth_dev);
+                       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+                               rte_free(list[i].eth_dev->data->dev_private);
+                       claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
+               }
+               /* Restore original error. */
+               rte_errno = -ret;
        } else {
-               rte_eth_copy_pci_info(eth_dev, pci_dev);
-               rte_eth_dev_probing_finish(eth_dev);
                ret = 0;
        }
        return ret;
@@ -1210,6 +1492,10 @@ static const struct rte_pci_id mlx5_pci_id_map[] = {
                RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
                               PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
        },
+       {
+               RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+                              PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
+       },
        {
                .vendor_id = 0
        }