net/mlx5: move backing PCI device to private context
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 617f5ee..c33e49d 100644 (file)
@@ -32,7 +32,6 @@
 #include <rte_bus_pci.h>
 #include <rte_common.h>
 #include <rte_config.h>
-#include <rte_eal_memconfig.h>
 #include <rte_kvargs.h>
 #include <rte_rwlock.h>
 #include <rte_spinlock.h>
@@ -373,7 +372,6 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
                sizeof(sh->ibdev_name));
        strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path,
                sizeof(sh->ibdev_path));
-       sh->pci_dev = spawn->pci_dev;
        pthread_mutex_init(&sh->intr_mutex, NULL);
        /*
         * Setting port_id to max unallowed value means
@@ -406,12 +404,18 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
         */
        err = mlx5_mr_btree_init(&sh->mr.cache,
                                 MLX5_MR_BTREE_CACHE_N * 2,
-                                sh->pci_dev->device.numa_node);
+                                spawn->pci_dev->device.numa_node);
        if (err) {
                err = rte_errno;
                goto error;
        }
        mlx5_flow_counters_mng_init(sh);
+       /* Add device to memory callback list. */
+       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+       LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
+                        sh, mem_event_cb);
+       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+       /* Add context to the global device list. */
        LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
 exit:
        pthread_mutex_unlock(&mlx5_ibv_list_mutex);
@@ -461,6 +465,11 @@ mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
                goto exit;
        /* Release created Memory Regions. */
        mlx5_mr_release(sh);
+       /* Remove from memory callback device list. */
+       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+       LIST_REMOVE(sh, mem_event_cb);
+       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+       /* Remove context from the global device list. */
        LIST_REMOVE(sh, next);
        /*
         *  Ensure there is no async event handler installed.
@@ -538,6 +547,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
                sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
        }
 #endif
+       sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
        sh->dv_refcnt++;
        priv->dr_shared = 1;
        return 0;
@@ -560,6 +570,10 @@ error:
                mlx5_glue->destroy_flow_action(sh->esw_drop_action);
                sh->esw_drop_action = NULL;
        }
+       if (sh->pop_vlan_action) {
+               mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
+               sh->pop_vlan_action = NULL;
+       }
        return err;
 #else
        (void)priv;
@@ -605,6 +619,10 @@ mlx5_free_shared_dr(struct mlx5_priv *priv)
                sh->esw_drop_action = NULL;
        }
 #endif
+       if (sh->pop_vlan_action) {
+               mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
+               sh->pop_vlan_action = NULL;
+       }
        pthread_mutex_destroy(&sh->dv_mutex);
 #else
        (void)priv;
@@ -733,6 +751,31 @@ mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
        rte_free(ptr);
 }
 
+/**
+ * DPDK callback to add udp tunnel port
+ *
+ * @param[in] dev
+ *   A pointer to eth_dev
+ * @param[in] udp_tunnel
+ *   A pointer to udp tunnel
+ *
+ * @return
+ *   0 on valid udp ports and tunnels, -ENOTSUP otherwise.
+ */
+int
+mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
+                        struct rte_eth_udp_tunnel *udp_tunnel)
+{
+       assert(udp_tunnel != NULL);
+       if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+           udp_tunnel->udp_port == 4789)
+               return 0;
+       if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+           udp_tunnel->udp_port == 4790)
+               return 0;
+       return -ENOTSUP;
+}
+
 /**
  * Initialize process private data structure.
  *
@@ -827,11 +870,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        }
        mlx5_proc_priv_uninit(dev);
        mlx5_mprq_free_mp(dev);
-       /* Remove from memory callback device list. */
-       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
-       assert(priv->sh);
-       LIST_REMOVE(priv->sh, mem_event_cb);
-       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
        mlx5_free_shared_dr(priv);
        if (priv->rss_conf.rss_key != NULL)
                rte_free(priv->rss_conf.rss_key);
@@ -843,6 +881,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                close(priv->nl_socket_route);
        if (priv->nl_socket_rdma >= 0)
                close(priv->nl_socket_rdma);
+       if (priv->vmwa_context)
+               mlx5_vlan_vmwa_exit(priv->vmwa_context);
        if (priv->sh) {
                /*
                 * Free the shared context in last turn, because the cleanup
@@ -954,6 +994,9 @@ const struct eth_dev_ops mlx5_dev_ops = {
        .rx_queue_intr_enable = mlx5_rx_intr_enable,
        .rx_queue_intr_disable = mlx5_rx_intr_disable,
        .is_removed = mlx5_is_removed,
+       .udp_tunnel_port_add  = mlx5_udp_tunnel_port_add,
+       .get_module_info = mlx5_get_module_info,
+       .get_module_eeprom = mlx5_get_module_eeprom,
 };
 
 /* Available operations from secondary process. */
@@ -967,6 +1010,8 @@ static const struct eth_dev_ops mlx5_dev_sec_ops = {
        .dev_infos_get = mlx5_dev_infos_get,
        .rx_descriptor_status = mlx5_rx_descriptor_status,
        .tx_descriptor_status = mlx5_tx_descriptor_status,
+       .get_module_info = mlx5_get_module_info,
+       .get_module_eeprom = mlx5_get_module_eeprom,
 };
 
 /* Available operations in flow isolated mode. */
@@ -1010,6 +1055,8 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
        .rx_queue_intr_enable = mlx5_rx_intr_enable,
        .rx_queue_intr_disable = mlx5_rx_intr_disable,
        .is_removed = mlx5_is_removed,
+       .get_module_info = mlx5_get_module_info,
+       .get_module_eeprom = mlx5_get_module_eeprom,
 };
 
 /**
@@ -1253,8 +1300,6 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
                switch (spawn->pci_dev->id.device_id) {
                case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
                case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
-               case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
-               case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
                        if (config->txq_inline_min <
                                       (int)MLX5_INLINE_HSIZE_L2) {
                                DRV_LOG(DEBUG,
@@ -1340,6 +1385,7 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
                 * and PFC control may be broken, so disable feature.
                 */
                config->hw_vlan_insert = 0;
+               config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
                break;
        default:
                config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
@@ -1460,6 +1506,7 @@ mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset)
                rte_free(page);
        } else {
                /* Mark in bitmap that this door-bell is not in use. */
+               offset /= MLX5_DBR_SIZE;
                int i = offset / 64;
                int j = offset % 64;
 
@@ -1707,6 +1754,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        }
        priv->sh = sh;
        priv->ibv_port = spawn->ibv_port;
+       priv->pci_dev = spawn->pci_dev;
        priv->mtu = RTE_ETHER_MTU;
 #ifndef RTE_ARCH_64
        /* Initialize UAR access locks for 32bit implementations. */
@@ -1855,7 +1903,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                if (priv->counter_fallback)
                        DRV_LOG(INFO, "Use fall-back DV counter management\n");
                /* Check for LRO support. */
-               if (config.dest_tir && mprq && config.hca_attr.lro_cap) {
+               if (config.dest_tir && config.hca_attr.lro_cap) {
                        /* TBD check tunnel lro caps. */
                        config.lro.supported = config.hca_attr.lro_cap;
                        DRV_LOG(DEBUG, "Device supports LRO");
@@ -1868,8 +1916,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                                config.hca_attr.lro_timer_supported_periods[0];
                        DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
                                config.lro.timeout);
-                       config.mprq.enabled = 1;
-                       DRV_LOG(DEBUG, "Enable MPRQ for LRO use");
                }
        }
        if (config.mprq.enabled && mprq) {
@@ -1990,6 +2036,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        mlx5_set_min_inline(spawn, &config);
        /* Store device configuration on private structure. */
        priv->config = config;
+       /* Create context for virtual machine VLAN workaround. */
+       priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
        if (config.dv_flow_en) {
                err = mlx5_alloc_shared_dr(priv);
                if (err)
@@ -2002,11 +2050,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                goto error;
        }
        priv->config.flow_prio = err;
-       /* Add device to memory callback list. */
-       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
-       LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
-                        sh, mem_event_cb);
-       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
        return eth_dev;
 error:
        if (priv) {
@@ -2016,6 +2059,8 @@ error:
                        close(priv->nl_socket_route);
                if (priv->nl_socket_rdma >= 0)
                        close(priv->nl_socket_rdma);
+               if (priv->vmwa_context)
+                       mlx5_vlan_vmwa_exit(priv->vmwa_context);
                if (own_domain_id)
                        claim_zero(rte_eth_switch_domain_free(priv->domain_id));
                rte_free(priv);
@@ -2379,6 +2424,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
        case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
        case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
        case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
+       case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
+       case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
                dev_config.vf = 1;
                break;
        default: