net/mlx5: update PCI address retrieving routine
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 909c22e..dcb22b2 100644 (file)
@@ -32,7 +32,6 @@
 #include <rte_bus_pci.h>
 #include <rte_common.h>
 #include <rte_config.h>
-#include <rte_eal_memconfig.h>
 #include <rte_kvargs.h>
 #include <rte_rwlock.h>
 #include <rte_spinlock.h>
@@ -373,7 +372,6 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
                sizeof(sh->ibdev_name));
        strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path,
                sizeof(sh->ibdev_path));
-       sh->pci_dev = spawn->pci_dev;
        pthread_mutex_init(&sh->intr_mutex, NULL);
        /*
         * Setting port_id to max unallowed value means
@@ -406,12 +404,18 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
         */
        err = mlx5_mr_btree_init(&sh->mr.cache,
                                 MLX5_MR_BTREE_CACHE_N * 2,
-                                sh->pci_dev->device.numa_node);
+                                spawn->pci_dev->device.numa_node);
        if (err) {
                err = rte_errno;
                goto error;
        }
        mlx5_flow_counters_mng_init(sh);
+       /* Add device to memory callback list. */
+       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+       LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
+                        sh, mem_event_cb);
+       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+       /* Add context to the global device list. */
        LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
 exit:
        pthread_mutex_unlock(&mlx5_ibv_list_mutex);
@@ -461,6 +465,11 @@ mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
                goto exit;
        /* Release created Memory Regions. */
        mlx5_mr_release(sh);
+       /* Remove from memory callback device list. */
+       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+       LIST_REMOVE(sh, mem_event_cb);
+       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+       /* Remove context from the global device list. */
        LIST_REMOVE(sh, next);
        /*
         *  Ensure there is no async event handler installed.
@@ -538,6 +547,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
                sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
        }
 #endif
+       sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
        sh->dv_refcnt++;
        priv->dr_shared = 1;
        return 0;
@@ -560,6 +570,10 @@ error:
                mlx5_glue->destroy_flow_action(sh->esw_drop_action);
                sh->esw_drop_action = NULL;
        }
+       if (sh->pop_vlan_action) {
+               mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
+               sh->pop_vlan_action = NULL;
+       }
        return err;
 #else
        (void)priv;
@@ -605,6 +619,10 @@ mlx5_free_shared_dr(struct mlx5_priv *priv)
                sh->esw_drop_action = NULL;
        }
 #endif
+       if (sh->pop_vlan_action) {
+               mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
+               sh->pop_vlan_action = NULL;
+       }
        pthread_mutex_destroy(&sh->dv_mutex);
 #else
        (void)priv;
@@ -733,6 +751,31 @@ mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
        rte_free(ptr);
 }
 
+/**
+ * DPDK callback to add udp tunnel port
+ *
+ * @param[in] dev
+ *   A pointer to eth_dev
+ * @param[in] udp_tunnel
+ *   A pointer to udp tunnel
+ *
+ * @return
+ *   0 on valid udp ports and tunnels, -ENOTSUP otherwise.
+ */
+int
+mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
+                        struct rte_eth_udp_tunnel *udp_tunnel)
+{
+       assert(udp_tunnel != NULL);
+       if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+           udp_tunnel->udp_port == 4789)
+               return 0;
+       if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+           udp_tunnel->udp_port == 4790)
+               return 0;
+       return -ENOTSUP;
+}
+
 /**
  * Initialize process private data structure.
  *
@@ -827,11 +870,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        }
        mlx5_proc_priv_uninit(dev);
        mlx5_mprq_free_mp(dev);
-       /* Remove from memory callback device list. */
-       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
-       assert(priv->sh);
-       LIST_REMOVE(priv->sh, mem_event_cb);
-       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
        mlx5_free_shared_dr(priv);
        if (priv->rss_conf.rss_key != NULL)
                rte_free(priv->rss_conf.rss_key);
@@ -956,6 +994,9 @@ const struct eth_dev_ops mlx5_dev_ops = {
        .rx_queue_intr_enable = mlx5_rx_intr_enable,
        .rx_queue_intr_disable = mlx5_rx_intr_disable,
        .is_removed = mlx5_is_removed,
+       .udp_tunnel_port_add  = mlx5_udp_tunnel_port_add,
+       .get_module_info = mlx5_get_module_info,
+       .get_module_eeprom = mlx5_get_module_eeprom,
 };
 
 /* Available operations from secondary process. */
@@ -969,6 +1010,8 @@ static const struct eth_dev_ops mlx5_dev_sec_ops = {
        .dev_infos_get = mlx5_dev_infos_get,
        .rx_descriptor_status = mlx5_rx_descriptor_status,
        .tx_descriptor_status = mlx5_tx_descriptor_status,
+       .get_module_info = mlx5_get_module_info,
+       .get_module_eeprom = mlx5_get_module_eeprom,
 };
 
 /* Available operations in flow isolated mode. */
@@ -1012,6 +1055,8 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
        .rx_queue_intr_enable = mlx5_rx_intr_enable,
        .rx_queue_intr_disable = mlx5_rx_intr_disable,
        .is_removed = mlx5_is_removed,
+       .get_module_info = mlx5_get_module_info,
+       .get_module_eeprom = mlx5_get_module_eeprom,
 };
 
 /**
@@ -1325,12 +1370,9 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
        switch (spawn->pci_dev->id.device_id) {
        case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
        case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
-               config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
-               config->hw_vlan_insert = 0;
-               break;
        case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
        case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
-               config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
+               config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
                config->hw_vlan_insert = 0;
                break;
        case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
@@ -1712,6 +1754,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        }
        priv->sh = sh;
        priv->ibv_port = spawn->ibv_port;
+       priv->pci_dev = spawn->pci_dev;
        priv->mtu = RTE_ETHER_MTU;
 #ifndef RTE_ARCH_64
        /* Initialize UAR access locks for 32bit implementations. */
@@ -2007,11 +2050,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                goto error;
        }
        priv->config.flow_prio = err;
-       /* Add device to memory callback list. */
-       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
-       LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
-                        sh, mem_event_cb);
-       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
        return eth_dev;
 error:
        if (priv) {
@@ -2145,7 +2183,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                struct rte_pci_addr pci_addr;
 
                DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
-               if (mlx5_ibv_device_to_pci_addr(ibv_list[ret], &pci_addr))
+               if (mlx5_dev_to_pci_addr
+                       (ibv_list[ret]->ibdev_path, &pci_addr))
                        continue;
                if (pci_dev->addr.domain != pci_addr.domain ||
                    pci_dev->addr.bus != pci_addr.bus ||
@@ -2386,6 +2425,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
        case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
        case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
        case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
+       case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
+       case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
                dev_config.vf = 1;
                break;
        default: