net/mlx5: add ConnectX6-DX device ID
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index a3eacdb..102c6ab 100644 (file)
@@ -32,7 +32,6 @@
 #include <rte_bus_pci.h>
 #include <rte_common.h>
 #include <rte_config.h>
-#include <rte_eal_memconfig.h>
 #include <rte_kvargs.h>
 #include <rte_rwlock.h>
 #include <rte_spinlock.h>
@@ -169,6 +168,7 @@ struct mlx5_dev_spawn_data {
        uint32_t ifindex; /**< Network interface index. */
        uint32_t max_port; /**< IB device maximal port index. */
        uint32_t ibv_port; /**< IB device physical port index. */
+       int pf_bond; /**< bonding device PF index. < 0 - no bonding */
        struct mlx5_switch_info info; /**< Switch information. */
        struct ibv_device *ibv_dev; /**< Associated IB device. */
        struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
@@ -178,6 +178,124 @@ struct mlx5_dev_spawn_data {
 static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
 static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
 
+#define MLX5_FLOW_MIN_ID_POOL_SIZE 512
+#define MLX5_ID_GENERATION_ARRAY_FACTOR 16
+
+/**
+ * Allocate ID pool structure.
+ *
+ * @return
+ *   Pointer to pool object, NULL value otherwise.
+ */
+struct mlx5_flow_id_pool *
+mlx5_flow_id_pool_alloc(void)
+{
+       struct mlx5_flow_id_pool *pool;
+       void *mem;
+
+       pool = rte_zmalloc("id pool allocation", sizeof(*pool),
+                          RTE_CACHE_LINE_SIZE);
+       if (!pool) {
+               DRV_LOG(ERR, "can't allocate id pool");
+               rte_errno  = ENOMEM;
+               return NULL;
+       }
+       mem = rte_zmalloc("", MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t),
+                         RTE_CACHE_LINE_SIZE);
+       if (!mem) {
+               DRV_LOG(ERR, "can't allocate mem for id pool");
+               rte_errno  = ENOMEM;
+               goto error;
+       }
+       pool->free_arr = mem;
+       pool->curr = pool->free_arr;
+       pool->last = pool->free_arr + MLX5_FLOW_MIN_ID_POOL_SIZE;
+       pool->base_index = 0;
+       return pool;
+error:
+       rte_free(pool);
+       return NULL;
+}
+
+/**
+ * Release ID pool structure.
+ *
+ * @param[in] pool
+ *   Pointer to flow id pool object to free.
+ */
+void
+mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool)
+{
+       rte_free(pool->free_arr);
+       rte_free(pool);
+}
+
+/**
+ * Generate ID.
+ *
+ * @param[in] pool
+ *   Pointer to flow id pool.
+ * @param[out] id
+ *   The generated ID.
+ *
+ * @return
+ *   0 on success, error value otherwise.
+ */
+uint32_t
+mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id)
+{
+       if (pool->curr == pool->free_arr) {
+               if (pool->base_index == UINT32_MAX) {
+                       rte_errno  = ENOMEM;
+                       DRV_LOG(ERR, "no free id");
+                       return -rte_errno;
+               }
+               *id = ++pool->base_index;
+               return 0;
+       }
+       *id = *(--pool->curr);
+       return 0;
+}
+
+/**
+ * Release ID.
+ *
+ * @param[in] pool
+ *   Pointer to flow id pool.
+ * @param[out] id
+ *   The generated ID.
+ *
+ * @return
+ *   0 on success, error value otherwise.
+ */
+uint32_t
+mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id)
+{
+       uint32_t size;
+       uint32_t size2;
+       void *mem;
+
+       if (pool->curr == pool->last) {
+               size = pool->curr - pool->free_arr;
+               size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR;
+               assert(size2 > size);
+               mem = rte_malloc("", size2 * sizeof(uint32_t), 0);
+               if (!mem) {
+                       DRV_LOG(ERR, "can't allocate mem for id pool");
+                       rte_errno  = ENOMEM;
+                       return -rte_errno;
+               }
+               memcpy(mem, pool->free_arr, size * sizeof(uint32_t));
+               rte_free(pool->free_arr);
+               pool->free_arr = mem;
+               pool->curr = pool->free_arr + size;
+               pool->last = pool->free_arr + size2;
+       }
+       *pool->curr = id;
+       pool->curr++;
+       return 0;
+}
+
 /**
  * Initialize the counters management structure.
  *
@@ -324,8 +442,11 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
        struct mlx5_ibv_shared *sh;
        int err = 0;
        uint32_t i;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       struct mlx5_devx_tis_attr tis_attr = { 0 };
+#endif
 
-       assert(spawn);
+assert(spawn);
        /* Secondary process should not create the shared context. */
        assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
        pthread_mutex_lock(&mlx5_ibv_list_mutex);
@@ -373,15 +494,16 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
                sizeof(sh->ibdev_name));
        strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path,
                sizeof(sh->ibdev_path));
-       sh->pci_dev = spawn->pci_dev;
        pthread_mutex_init(&sh->intr_mutex, NULL);
        /*
         * Setting port_id to max unallowed value means
         * there is no interrupt subhandler installed for
         * the given port index i.
         */
-       for (i = 0; i < sh->max_port; i++)
+       for (i = 0; i < sh->max_port; i++) {
                sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
+               sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
+       }
        sh->pd = mlx5_glue->alloc_pd(sh->ctx);
        if (sh->pd == NULL) {
                DRV_LOG(ERR, "PD allocation failure");
@@ -389,9 +511,30 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
                goto error;
        }
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-       err = mlx5_get_pdn(sh->pd, &sh->pdn);
-       if (err) {
-               DRV_LOG(ERR, "Fail to extract pdn from PD");
+       if (sh->devx) {
+               err = mlx5_get_pdn(sh->pd, &sh->pdn);
+               if (err) {
+                       DRV_LOG(ERR, "Fail to extract pdn from PD");
+                       goto error;
+               }
+               sh->td = mlx5_devx_cmd_create_td(sh->ctx);
+               if (!sh->td) {
+                       DRV_LOG(ERR, "TD allocation failure");
+                       err = ENOMEM;
+                       goto error;
+               }
+               tis_attr.transport_domain = sh->td->id;
+               sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr);
+               if (!sh->tis) {
+                       DRV_LOG(ERR, "TIS allocation failure");
+                       err = ENOMEM;
+                       goto error;
+               }
+       }
+       sh->flow_id_pool = mlx5_flow_id_pool_alloc();
+       if (!sh->flow_id_pool) {
+               DRV_LOG(ERR, "can't create flow id pool");
+               err = ENOMEM;
                goto error;
        }
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
@@ -406,12 +549,18 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
         */
        err = mlx5_mr_btree_init(&sh->mr.cache,
                                 MLX5_MR_BTREE_CACHE_N * 2,
-                                sh->pci_dev->device.numa_node);
+                                spawn->pci_dev->device.numa_node);
        if (err) {
                err = rte_errno;
                goto error;
        }
        mlx5_flow_counters_mng_init(sh);
+       /* Add device to memory callback list. */
+       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+       LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
+                        sh, mem_event_cb);
+       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+       /* Add context to the global device list. */
        LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
 exit:
        pthread_mutex_unlock(&mlx5_ibv_list_mutex);
@@ -419,10 +568,16 @@ exit:
 error:
        pthread_mutex_unlock(&mlx5_ibv_list_mutex);
        assert(sh);
+       if (sh->tis)
+               claim_zero(mlx5_devx_cmd_destroy(sh->tis));
+       if (sh->td)
+               claim_zero(mlx5_devx_cmd_destroy(sh->td));
        if (sh->pd)
                claim_zero(mlx5_glue->dealloc_pd(sh->pd));
        if (sh->ctx)
                claim_zero(mlx5_glue->close_device(sh->ctx));
+       if (sh->flow_id_pool)
+               mlx5_flow_id_pool_release(sh->flow_id_pool);
        rte_free(sh);
        assert(err > 0);
        rte_errno = err;
@@ -461,6 +616,11 @@ mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
                goto exit;
        /* Release created Memory Regions. */
        mlx5_mr_release(sh);
+       /* Remove from memory callback device list. */
+       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+       LIST_REMOVE(sh, mem_event_cb);
+       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+       /* Remove context from the global device list. */
        LIST_REMOVE(sh, next);
        /*
         *  Ensure there is no async event handler installed.
@@ -471,11 +631,26 @@ mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
        if (sh->intr_cnt)
                mlx5_intr_callback_unregister
                        (&sh->intr_handle, mlx5_dev_interrupt_handler, sh);
+#ifdef HAVE_MLX5_DEVX_ASYNC_SUPPORT
+       if (sh->devx_intr_cnt) {
+               if (sh->intr_handle_devx.fd)
+                       rte_intr_callback_unregister(&sh->intr_handle_devx,
+                                         mlx5_dev_interrupt_handler_devx, sh);
+               if (sh->devx_comp)
+                       mlx5dv_devx_destroy_cmd_comp(sh->devx_comp);
+       }
+#endif
        pthread_mutex_destroy(&sh->intr_mutex);
        if (sh->pd)
                claim_zero(mlx5_glue->dealloc_pd(sh->pd));
+       if (sh->tis)
+               claim_zero(mlx5_devx_cmd_destroy(sh->tis));
+       if (sh->td)
+               claim_zero(mlx5_devx_cmd_destroy(sh->td));
        if (sh->ctx)
                claim_zero(mlx5_glue->close_device(sh->ctx));
+       if (sh->flow_id_pool)
+               mlx5_flow_id_pool_release(sh->flow_id_pool);
        rte_free(sh);
 exit:
        pthread_mutex_unlock(&mlx5_ibv_list_mutex);
@@ -538,6 +713,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
                sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
        }
 #endif
+       sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
        sh->dv_refcnt++;
        priv->dr_shared = 1;
        return 0;
@@ -560,6 +736,10 @@ error:
                mlx5_glue->destroy_flow_action(sh->esw_drop_action);
                sh->esw_drop_action = NULL;
        }
+       if (sh->pop_vlan_action) {
+               mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
+               sh->pop_vlan_action = NULL;
+       }
        return err;
 #else
        (void)priv;
@@ -605,6 +785,10 @@ mlx5_free_shared_dr(struct mlx5_priv *priv)
                sh->esw_drop_action = NULL;
        }
 #endif
+       if (sh->pop_vlan_action) {
+               mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
+               sh->pop_vlan_action = NULL;
+       }
        pthread_mutex_destroy(&sh->dv_mutex);
 #else
        (void)priv;
@@ -635,7 +819,7 @@ mlx5_init_shared_data(void)
                                                 SOCKET_ID_ANY, 0);
                        if (mz == NULL) {
                                DRV_LOG(ERR,
-                                       "Cannot allocate mlx5 shared data\n");
+                                       "Cannot allocate mlx5 shared data");
                                ret = -rte_errno;
                                goto error;
                        }
@@ -647,7 +831,7 @@ mlx5_init_shared_data(void)
                        mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
                        if (mz == NULL) {
                                DRV_LOG(ERR,
-                                       "Cannot attach mlx5 shared data\n");
+                                       "Cannot attach mlx5 shared data");
                                ret = -rte_errno;
                                goto error;
                        }
@@ -733,6 +917,31 @@ mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
        rte_free(ptr);
 }
 
+/**
+ * DPDK callback to add udp tunnel port
+ *
+ * @param[in] dev
+ *   A pointer to eth_dev
+ * @param[in] udp_tunnel
+ *   A pointer to udp tunnel
+ *
+ * @return
+ *   0 on valid udp ports and tunnels, -ENOTSUP otherwise.
+ */
+int
+mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
+                        struct rte_eth_udp_tunnel *udp_tunnel)
+{
+       assert(udp_tunnel != NULL);
+       if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+           udp_tunnel->udp_port == 4789)
+               return 0;
+       if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+           udp_tunnel->udp_port == 4790)
+               return 0;
+       return -ENOTSUP;
+}
+
 /**
  * Initialize process private data structure.
  *
@@ -801,6 +1010,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : ""));
        /* In case mlx5_dev_stop() has not been called. */
        mlx5_dev_interrupt_handler_uninstall(dev);
+       mlx5_dev_interrupt_handler_devx_uninstall(dev);
        mlx5_traffic_disable(dev);
        mlx5_flow_flush(dev, NULL);
        /* Prevent crashes when queues are still in use. */
@@ -827,11 +1037,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        }
        mlx5_proc_priv_uninit(dev);
        mlx5_mprq_free_mp(dev);
-       /* Remove from memory callback device list. */
-       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
-       assert(priv->sh);
-       LIST_REMOVE(priv->sh, mem_event_cb);
-       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
        mlx5_free_shared_dr(priv);
        if (priv->rss_conf.rss_key != NULL)
                rte_free(priv->rss_conf.rss_key);
@@ -871,7 +1076,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        if (ret)
                DRV_LOG(WARNING, "port %u some Rx queues still remain",
                        dev->data->port_id);
-       ret = mlx5_txq_ibv_verify(dev);
+       ret = mlx5_txq_obj_verify(dev);
        if (ret)
                DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
                        dev->data->port_id);
@@ -887,7 +1092,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                unsigned int c = 0;
                uint16_t port_id;
 
-               RTE_ETH_FOREACH_DEV_OF(port_id, dev->device) {
+               MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
                        struct mlx5_priv *opriv =
                                rte_eth_devices[port_id].data->dev_private;
 
@@ -896,6 +1101,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                            &rte_eth_devices[port_id] == dev)
                                continue;
                        ++c;
+                       break;
                }
                if (!c)
                        claim_zero(rte_eth_switch_domain_free(priv->domain_id));
@@ -933,7 +1139,9 @@ const struct eth_dev_ops mlx5_dev_ops = {
        .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
        .vlan_filter_set = mlx5_vlan_filter_set,
        .rx_queue_setup = mlx5_rx_queue_setup,
+       .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
        .tx_queue_setup = mlx5_tx_queue_setup,
+       .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
        .rx_queue_release = mlx5_rx_queue_release,
        .tx_queue_release = mlx5_tx_queue_release,
        .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
@@ -956,6 +1164,10 @@ const struct eth_dev_ops mlx5_dev_ops = {
        .rx_queue_intr_enable = mlx5_rx_intr_enable,
        .rx_queue_intr_disable = mlx5_rx_intr_disable,
        .is_removed = mlx5_is_removed,
+       .udp_tunnel_port_add  = mlx5_udp_tunnel_port_add,
+       .get_module_info = mlx5_get_module_info,
+       .get_module_eeprom = mlx5_get_module_eeprom,
+       .hairpin_cap_get = mlx5_hairpin_cap_get,
 };
 
 /* Available operations from secondary process. */
@@ -969,6 +1181,8 @@ static const struct eth_dev_ops mlx5_dev_sec_ops = {
        .dev_infos_get = mlx5_dev_infos_get,
        .rx_descriptor_status = mlx5_rx_descriptor_status,
        .tx_descriptor_status = mlx5_tx_descriptor_status,
+       .get_module_info = mlx5_get_module_info,
+       .get_module_eeprom = mlx5_get_module_eeprom,
 };
 
 /* Available operations in flow isolated mode. */
@@ -994,7 +1208,9 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
        .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
        .vlan_filter_set = mlx5_vlan_filter_set,
        .rx_queue_setup = mlx5_rx_queue_setup,
+       .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
        .tx_queue_setup = mlx5_tx_queue_setup,
+       .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
        .rx_queue_release = mlx5_rx_queue_release,
        .tx_queue_release = mlx5_tx_queue_release,
        .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
@@ -1012,6 +1228,9 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
        .rx_queue_intr_enable = mlx5_rx_intr_enable,
        .rx_queue_intr_disable = mlx5_rx_intr_disable,
        .is_removed = mlx5_is_removed,
+       .get_module_info = mlx5_get_module_info,
+       .get_module_eeprom = mlx5_get_module_eeprom,
+       .hairpin_cap_get = mlx5_hairpin_cap_get,
 };
 
 /**
@@ -1470,6 +1689,53 @@ mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset)
        return ret;
 }
 
+/**
+ * Check sibling device configurations.
+ *
+ * Sibling devices sharing the Infiniband device context
+ * should have compatible configurations. This regards
+ * representors and bonding slaves.
+ *
+ * @param priv
+ *   Private device descriptor.
+ * @param config
+ *   Configuration of the device is going to be created.
+ *
+ * @return
+ *   0 on success, EINVAL otherwise
+ */
+static int
+mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
+                             struct mlx5_dev_config *config)
+{
+       struct mlx5_ibv_shared *sh = priv->sh;
+       struct mlx5_dev_config *sh_conf = NULL;
+       uint16_t port_id;
+
+       assert(sh);
+       /* Nothing to compare for the single/first device. */
+       if (sh->refcnt == 1)
+               return 0;
+       /* Find the device with shared context. */
+       MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
+               struct mlx5_priv *opriv =
+                       rte_eth_devices[port_id].data->dev_private;
+
+               if (opriv && opriv != priv && opriv->sh == sh) {
+                       sh_conf = &opriv->config;
+                       break;
+               }
+       }
+       if (!sh_conf)
+               return 0;
+       if (sh_conf->dv_flow_en ^ config->dv_flow_en) {
+               DRV_LOG(ERR, "\"dv_flow_en\" configuration mismatch"
+                            " for shared %s context", sh->ibdev_name);
+               rte_errno = EINVAL;
+               return rte_errno;
+       }
+       return 0;
+}
 /**
  * Spawn an Ethernet device from Verbs information.
  *
@@ -1516,6 +1782,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        int own_domain_id = 0;
        uint16_t port_id;
        unsigned int i;
+#ifdef HAVE_MLX5DV_DR_DEVX_PORT
+       struct mlx5dv_devx_port devx_port;
+#endif
 
        /* Determine if this port representor is supposed to be spawned. */
        if (switch_info->representor && dpdk_dev->devargs) {
@@ -1538,11 +1807,23 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                }
        }
        /* Build device name. */
-       if (!switch_info->representor)
-               strlcpy(name, dpdk_dev->name, sizeof(name));
-       else
-               snprintf(name, sizeof(name), "%s_representor_%u",
-                        dpdk_dev->name, switch_info->port_name);
+       if (spawn->pf_bond <  0) {
+               /* Single device. */
+               if (!switch_info->representor)
+                       strlcpy(name, dpdk_dev->name, sizeof(name));
+               else
+                       snprintf(name, sizeof(name), "%s_representor_%u",
+                                dpdk_dev->name, switch_info->port_name);
+       } else {
+               /* Bonding device. */
+               if (!switch_info->representor)
+                       snprintf(name, sizeof(name), "%s_%s",
+                                dpdk_dev->name, spawn->ibv_dev->name);
+               else
+                       snprintf(name, sizeof(name), "%s_%s_representor_%u",
+                                dpdk_dev->name, spawn->ibv_dev->name,
+                                switch_info->port_name);
+       }
        /* check if the device is already spawned */
        if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
                rte_errno = EEXIST;
@@ -1709,6 +1990,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        }
        priv->sh = sh;
        priv->ibv_port = spawn->ibv_port;
+       priv->pci_dev = spawn->pci_dev;
        priv->mtu = RTE_ETHER_MTU;
 #ifndef RTE_ARCH_64
        /* Initialize UAR access locks for 32bit implementations. */
@@ -1723,8 +2005,57 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        priv->representor = !!switch_info->representor;
        priv->master = !!switch_info->master;
        priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
+       priv->vport_meta_tag = 0;
+       priv->vport_meta_mask = 0;
+       priv->pf_bond = spawn->pf_bond;
+#ifdef HAVE_MLX5DV_DR_DEVX_PORT
        /*
-        * Currently we support single E-Switch per PF configurations
+        * The DevX port query API is implemented. E-Switch may use
+        * either vport or reg_c[0] metadata register to match on
+        * vport index. The engaged part of metadata register is
+        * defined by mask.
+        */
+       devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT |
+                             MLX5DV_DEVX_PORT_MATCH_REG_C_0;
+       err = mlx5_glue->devx_port_query(sh->ctx, spawn->ibv_port, &devx_port);
+       if (err) {
+               DRV_LOG(WARNING, "can't query devx port %d on device %s",
+                       spawn->ibv_port, spawn->ibv_dev->name);
+               devx_port.comp_mask = 0;
+       }
+       if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) {
+               priv->vport_meta_tag = devx_port.reg_c_0.value;
+               priv->vport_meta_mask = devx_port.reg_c_0.mask;
+               if (!priv->vport_meta_mask) {
+                       DRV_LOG(ERR, "vport zero mask for port %d"
+                                    " on bonding device %s",
+                                    spawn->ibv_port, spawn->ibv_dev->name);
+                       err = ENOTSUP;
+                       goto error;
+               }
+               if (priv->vport_meta_tag & ~priv->vport_meta_mask) {
+                       DRV_LOG(ERR, "invalid vport tag for port %d"
+                                    " on bonding device %s",
+                                    spawn->ibv_port, spawn->ibv_dev->name);
+                       err = ENOTSUP;
+                       goto error;
+               }
+       } else if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {
+               priv->vport_id = devx_port.vport_num;
+       } else if (spawn->pf_bond >= 0) {
+               DRV_LOG(ERR, "can't deduce vport index for port %d"
+                            " on bonding device %s",
+                            spawn->ibv_port, spawn->ibv_dev->name);
+               err = ENOTSUP;
+               goto error;
+       } else {
+               /* Suppose vport index in compatible way. */
+               priv->vport_id = switch_info->representor ?
+                                switch_info->port_name + 1 : -1;
+       }
+#else
+       /*
+        * Kernel/rdma_core support single E-Switch per PF configurations
         * only and vport_id field contains the vport index for
         * associated VF, which is deduced from representor port name.
         * For example, let's have the IB device port 10, it has
@@ -1736,18 +2067,20 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
         */
        priv->vport_id = switch_info->representor ?
                         switch_info->port_name + 1 : -1;
-       /* representor_id field keeps the unmodified port/VF index. */
+#endif
+       /* representor_id field keeps the unmodified VF index. */
        priv->representor_id = switch_info->representor ?
                               switch_info->port_name : -1;
        /*
         * Look for sibling devices in order to reuse their switch domain
         * if any, otherwise allocate one.
         */
-       RTE_ETH_FOREACH_DEV_OF(port_id, dpdk_dev) {
+       MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
                const struct mlx5_priv *opriv =
                        rte_eth_devices[port_id].data->dev_private;
 
                if (!opriv ||
+                   opriv->sh != priv->sh ||
                        opriv->domain_id ==
                        RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
                        continue;
@@ -1771,6 +2104,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                        strerror(rte_errno));
                goto error;
        }
+       err = mlx5_dev_check_sibling_config(priv, &config);
+       if (err)
+               goto error;
        config.hw_csum = !!(sh->device_attr.device_cap_flags_ex &
                            IBV_DEVICE_RAW_IP_CSUM);
        DRV_LOG(DEBUG, "checksum offloading is %ssupported",
@@ -1855,9 +2191,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                priv->counter_fallback = 1;
 #endif
                if (priv->counter_fallback)
-                       DRV_LOG(INFO, "Use fall-back DV counter management\n");
+                       DRV_LOG(INFO, "Use fall-back DV counter management");
                /* Check for LRO support. */
-               if (config.dest_tir && config.hca_attr.lro_cap) {
+               if (config.dest_tir && config.hca_attr.lro_cap &&
+                   config.dv_flow_en) {
                        /* TBD check tunnel lro caps. */
                        config.lro.supported = config.hca_attr.lro_cap;
                        DRV_LOG(DEBUG, "Device supports LRO");
@@ -2004,11 +2341,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                goto error;
        }
        priv->config.flow_prio = err;
-       /* Add device to memory callback list. */
-       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
-       LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
-                        sh, mem_event_cb);
-       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
        return eth_dev;
 error:
        if (priv) {
@@ -2076,6 +2408,105 @@ mlx5_dev_spawn_data_cmp(const void *a, const void *b)
        return si_a->port_name - si_b->port_name;
 }
 
+/**
+ * Match PCI information for possible slaves of bonding device.
+ *
+ * @param[in] ibv_dev
+ *   Pointer to Infiniband device structure.
+ * @param[in] pci_dev
+ *   Pointer to PCI device structure to match PCI address.
+ * @param[in] nl_rdma
+ *   Netlink RDMA group socket handle.
+ *
+ * @return
+ *   negative value if no bonding device found, otherwise
+ *   positive index of slave PF in bonding.
+ */
+static int
+mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
+                          const struct rte_pci_device *pci_dev,
+                          int nl_rdma)
+{
+       char ifname[IF_NAMESIZE + 1];
+       unsigned int ifindex;
+       unsigned int np, i;
+       FILE *file = NULL;
+       int pf = -1;
+
+       /*
+        * Try to get master device name. If something goes
+        * wrong suppose the lack of kernel support and no
+        * bonding devices.
+        */
+       if (nl_rdma < 0)
+               return -1;
+       if (!strstr(ibv_dev->name, "bond"))
+               return -1;
+       np = mlx5_nl_portnum(nl_rdma, ibv_dev->name);
+       if (!np)
+               return -1;
+       /*
+        * The Master device might not be on the predefined
+        * port (not on port index 1, it is not garanted),
+        * we have to scan all Infiniband device port and
+        * find master.
+        */
+       for (i = 1; i <= np; ++i) {
+               /* Check whether Infiniband port is populated. */
+               ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i);
+               if (!ifindex)
+                       continue;
+               if (!if_indextoname(ifindex, ifname))
+                       continue;
+               /* Try to read bonding slave names from sysfs. */
+               MKSTR(slaves,
+                     "/sys/class/net/%s/master/bonding/slaves", ifname);
+               file = fopen(slaves, "r");
+               if (file)
+                       break;
+       }
+       if (!file)
+               return -1;
+       /* Use safe format to check maximal buffer length. */
+       assert(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
+       while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
+               char tmp_str[IF_NAMESIZE + 32];
+               struct rte_pci_addr pci_addr;
+               struct mlx5_switch_info info;
+
+               /* Process slave interface names in the loop. */
+               snprintf(tmp_str, sizeof(tmp_str),
+                        "/sys/class/net/%s", ifname);
+               if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) {
+                       DRV_LOG(WARNING, "can not get PCI address"
+                                        " for netdev \"%s\"", ifname);
+                       continue;
+               }
+               if (pci_dev->addr.domain != pci_addr.domain ||
+                   pci_dev->addr.bus != pci_addr.bus ||
+                   pci_dev->addr.devid != pci_addr.devid ||
+                   pci_dev->addr.function != pci_addr.function)
+                       continue;
+               /* Slave interface PCI address match found. */
+               fclose(file);
+               snprintf(tmp_str, sizeof(tmp_str),
+                        "/sys/class/net/%s/phys_port_name", ifname);
+               file = fopen(tmp_str, "rb");
+               if (!file)
+                       break;
+               info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET;
+               if (fscanf(file, "%32s", tmp_str) == 1)
+                       mlx5_translate_port_name(tmp_str, &info);
+               if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY ||
+                   info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
+                       pf = info.port_name;
+               break;
+       }
+       if (file)
+               fclose(file);
+       return pf;
+}
+
 /**
  * DPDK callback to register a PCI device.
  *
@@ -2112,6 +2543,13 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
         * Actually this is the number of iterations to spawn.
         */
        unsigned int ns = 0;
+       /*
+        * Bonding device
+        *   < 0 - no bonding device (single one)
+        *  >= 0 - bonding device (value is slave PF index)
+        */
+       int bd = -1;
+       struct mlx5_dev_spawn_data *list = NULL;
        struct mlx5_dev_config dev_config;
        int ret;
 
@@ -2134,15 +2572,40 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
         * matching ones, gathering into the list.
         */
        struct ibv_device *ibv_match[ret + 1];
-       int nl_route = -1;
-       int nl_rdma = -1;
+       int nl_route = mlx5_nl_init(NETLINK_ROUTE);
+       int nl_rdma = mlx5_nl_init(NETLINK_RDMA);
        unsigned int i;
 
        while (ret-- > 0) {
                struct rte_pci_addr pci_addr;
 
                DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
-               if (mlx5_ibv_device_to_pci_addr(ibv_list[ret], &pci_addr))
+               bd = mlx5_device_bond_pci_match
+                               (ibv_list[ret], pci_dev, nl_rdma);
+               if (bd >= 0) {
+                       /*
+                        * Bonding device detected. Only one match is allowed,
+                        * the bonding is supported over multi-port IB device,
+                        * there should be no matches on representor PCI
+                        * functions or non VF LAG bonding devices with
+                        * specified address.
+                        */
+                       if (nd) {
+                               DRV_LOG(ERR,
+                                       "multiple PCI match on bonding device"
+                                       "\"%s\" found", ibv_list[ret]->name);
+                               rte_errno = ENOENT;
+                               ret = -rte_errno;
+                               goto exit;
+                       }
+                       DRV_LOG(INFO, "PCI information matches for"
+                                     " slave %d bonding device \"%s\"",
+                                     bd, ibv_list[ret]->name);
+                       ibv_match[nd++] = ibv_list[ret];
+                       break;
+               }
+               if (mlx5_dev_to_pci_addr
+                       (ibv_list[ret]->ibdev_path, &pci_addr))
                        continue;
                if (pci_dev->addr.domain != pci_addr.domain ||
                    pci_dev->addr.bus != pci_addr.bus ||
@@ -2156,7 +2619,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
        ibv_match[nd] = NULL;
        if (!nd) {
                /* No device matches, just complain and bail out. */
-               mlx5_glue->free_device_list(ibv_list);
                DRV_LOG(WARNING,
                        "no Verbs device matches PCI device " PCI_PRI_FMT ","
                        " are kernel drivers loaded?",
@@ -2164,10 +2626,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                        pci_dev->addr.devid, pci_dev->addr.function);
                rte_errno = ENOENT;
                ret = -rte_errno;
-               return ret;
+               goto exit;
        }
-       nl_route = mlx5_nl_init(NETLINK_ROUTE);
-       nl_rdma = mlx5_nl_init(NETLINK_RDMA);
        if (nd == 1) {
                /*
                 * Found single matching device may have multiple ports.
@@ -2179,14 +2639,42 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                if (!np)
                        DRV_LOG(WARNING, "can not get IB device \"%s\""
                                         " ports number", ibv_match[0]->name);
+               if (bd >= 0 && !np) {
+                       DRV_LOG(ERR, "can not get ports"
+                                    " for bonding device");
+                       rte_errno = ENOENT;
+                       ret = -rte_errno;
+                       goto exit;
+               }
        }
+#ifndef HAVE_MLX5DV_DR_DEVX_PORT
+       if (bd >= 0) {
+               /*
+                * This may happen if there is VF LAG kernel support and
+                * application is compiled with older rdma_core library.
+                */
+               DRV_LOG(ERR,
+                       "No kernel/verbs support for VF LAG bonding found.");
+               rte_errno = ENOTSUP;
+               ret = -rte_errno;
+               goto exit;
+       }
+#endif
        /*
         * Now we can determine the maximal
         * amount of devices to be spawned.
         */
-       struct mlx5_dev_spawn_data list[np ? np : nd];
-
-       if (np > 1) {
+       list = rte_zmalloc("device spawn data",
+                        sizeof(struct mlx5_dev_spawn_data) *
+                        (np ? np : nd),
+                        RTE_CACHE_LINE_SIZE);
+       if (!list) {
+               DRV_LOG(ERR, "spawn data array allocation failure");
+               rte_errno = ENOMEM;
+               ret = -rte_errno;
+               goto exit;
+       }
+       if (bd >= 0 || np > 1) {
                /*
                 * Single IB device with multiple ports found,
                 * it may be E-Switch master device and representors.
@@ -2195,12 +2683,14 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                assert(nl_rdma >= 0);
                assert(ns == 0);
                assert(nd == 1);
+               assert(np);
                for (i = 1; i <= np; ++i) {
                        list[ns].max_port = np;
                        list[ns].ibv_port = i;
                        list[ns].ibv_dev = ibv_match[0];
                        list[ns].eth_dev = NULL;
                        list[ns].pci_dev = pci_dev;
+                       list[ns].pf_bond = bd;
                        list[ns].ifindex = mlx5_nl_ifindex
                                        (nl_rdma, list[ns].ibv_dev->name, i);
                        if (!list[ns].ifindex) {
@@ -2230,6 +2720,21 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                                                (list[ns].ifindex,
                                                 &list[ns].info);
                        }
+                       if (!ret && bd >= 0) {
+                               switch (list[ns].info.name_type) {
+                               case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
+                                       if (list[ns].info.port_name == bd)
+                                               ns++;
+                                       break;
+                               case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
+                                       if (list[ns].info.pf_num == bd)
+                                               ns++;
+                                       break;
+                               default:
+                                       break;
+                               }
+                               continue;
+                       }
                        if (!ret && (list[ns].info.representor ^
                                     list[ns].info.master))
                                ns++;
@@ -2268,6 +2773,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                        list[ns].ibv_dev = ibv_match[i];
                        list[ns].eth_dev = NULL;
                        list[ns].pci_dev = pci_dev;
+                       list[ns].pf_bond = -1;
                        list[ns].ifindex = 0;
                        if (nl_rdma >= 0)
                                list[ns].ifindex = mlx5_nl_ifindex
@@ -2383,6 +2889,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
        case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
        case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
        case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
+       case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
+       case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
+       case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF:
                dev_config.vf = 1;
                break;
        default:
@@ -2404,6 +2913,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
                /* Restore non-PCI flags cleared by the above call. */
                list[i].eth_dev->data->dev_flags |= restore;
+               mlx5_dev_interrupt_handler_devx_install(list[i].eth_dev);
                rte_eth_dev_probing_finish(list[i].eth_dev);
        }
        if (i != ns) {
@@ -2432,17 +2942,55 @@ exit:
        /*
         * Do the routine cleanup:
         * - close opened Netlink sockets
+        * - free allocated spawn data array
         * - free the Infiniband device list
         */
        if (nl_rdma >= 0)
                close(nl_rdma);
        if (nl_route >= 0)
                close(nl_route);
+       if (list)
+               rte_free(list);
        assert(ibv_list);
        mlx5_glue->free_device_list(ibv_list);
        return ret;
 }
 
+/**
+ * Look for the ethernet device belonging to mlx5 driver.
+ *
+ * @param[in] port_id
+ *   port_id to start looking for device.
+ * @param[in] pci_dev
+ *   Pointer to the hint PCI device. When device is being probed
+ *   the its siblings (master and preceding representors might
+ *   not have assigned driver yet (because the mlx5_pci_probe()
+ *   is not completed yet, for this case match on hint PCI
+ *   device may be used to detect sibling device.
+ *
+ * @return
+ *   port_id of found device, RTE_MAX_ETHPORT if not found.
+ */
+uint16_t
+mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev)
+{
+       while (port_id < RTE_MAX_ETHPORTS) {
+               struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+               if (dev->state != RTE_ETH_DEV_UNUSED &&
+                   dev->device &&
+                   (dev->device == &pci_dev->device ||
+                    (dev->device->driver &&
+                    dev->device->driver->name &&
+                    !strcmp(dev->device->driver->name, MLX5_DRIVER_NAME))))
+                       break;
+               port_id++;
+       }
+       if (port_id >= RTE_MAX_ETHPORTS)
+               return RTE_MAX_ETHPORTS;
+       return port_id;
+}
+
 /**
  * DPDK callback to remove a PCI device.
  *
@@ -2513,6 +3061,14 @@ static const struct rte_pci_id mlx5_pci_id_map[] = {
                RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
                                PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
        },
+       {
+               RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+                               PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
+       },
+       {
+               RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+                               PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF)
+       },
        {
                .vendor_id = 0
        }