net/mlx5: support UDP tunnel adding
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index a3a9013..dc7b10b 100644 (file)
@@ -269,6 +269,37 @@ mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh)
        memset(&sh->cmng, 0, sizeof(sh->cmng));
 }
 
+/**
+ * Extract pdn of PD object using DV API.
+ *
+ * @param[in] pd
+ *   Pointer to the verbs PD object.
+ * @param[out] pdn
+ *   Pointer to the PD object number variable.
+ *
+ * @return
+ *   0 on success, error value otherwise.
+ */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+static int
+mlx5_get_pdn(struct ibv_pd *pd __rte_unused, uint32_t *pdn __rte_unused)
+{
+       struct mlx5dv_obj obj;
+       struct mlx5dv_pd pd_info;
+       int ret = 0;
+
+       obj.pd.in = pd;
+       obj.pd.out = &pd_info;
+       ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
+       if (ret) {
+               DRV_LOG(DEBUG, "Fail to get PD object info");
+               return ret;
+       }
+       *pdn = pd_info.pdn;
+       return 0;
+}
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+
 /**
  * Allocate shared IB device context. If there is multiport device the
  * master and representors will share this context, if there is single
@@ -357,6 +388,13 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
                err = ENOMEM;
                goto error;
        }
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       err = mlx5_get_pdn(sh->pd, &sh->pdn);
+       if (err) {
+               DRV_LOG(ERR, "Fail to extract pdn from PD");
+               goto error;
+       }
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
        /*
         * Once the device is added to the list of memory event
         * callback, its global MR cache table cannot be expanded
@@ -374,6 +412,12 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
                goto error;
        }
        mlx5_flow_counters_mng_init(sh);
+       /* Add device to memory callback list. */
+       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+       LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
+                        sh, mem_event_cb);
+       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+       /* Add context to the global device list. */
        LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
 exit:
        pthread_mutex_unlock(&mlx5_ibv_list_mutex);
@@ -423,6 +467,11 @@ mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
                goto exit;
        /* Release created Memory Regions. */
        mlx5_mr_release(sh);
+       /* Remove from memory callback device list. */
+       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+       LIST_REMOVE(sh, mem_event_cb);
+       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+       /* Remove context from the global device list. */
        LIST_REMOVE(sh, next);
        /*
         *  Ensure there is no async event handler installed.
@@ -695,6 +744,31 @@ mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
        rte_free(ptr);
 }
 
+/**
+ * DPDK callback to add udp tunnel port
+ *
+ * @param[in] dev
+ *   A pointer to eth_dev
+ * @param[in] udp_tunnel
+ *   A pointer to udp tunnel
+ *
+ * @return
+ *   0 on valid udp ports and tunnels, -ENOTSUP otherwise.
+ */
+int
+mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
+                        struct rte_eth_udp_tunnel *udp_tunnel)
+{
+       assert(udp_tunnel != NULL);
+       if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+           udp_tunnel->udp_port == 4789)
+               return 0;
+       if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+           udp_tunnel->udp_port == 4790)
+               return 0;
+       return -ENOTSUP;
+}
+
 /**
  * Initialize process private data structure.
  *
@@ -789,11 +863,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        }
        mlx5_proc_priv_uninit(dev);
        mlx5_mprq_free_mp(dev);
-       /* Remove from memory callback device list. */
-       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
-       assert(priv->sh);
-       LIST_REMOVE(priv->sh, mem_event_cb);
-       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
        mlx5_free_shared_dr(priv);
        if (priv->rss_conf.rss_key != NULL)
                rte_free(priv->rss_conf.rss_key);
@@ -805,6 +874,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                close(priv->nl_socket_route);
        if (priv->nl_socket_rdma >= 0)
                close(priv->nl_socket_rdma);
+       if (priv->vmwa_context)
+               mlx5_vlan_vmwa_exit(priv->vmwa_context);
        if (priv->sh) {
                /*
                 * Free the shared context in last turn, because the cleanup
@@ -815,7 +886,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                mlx5_free_shared_ibctx(priv->sh);
                priv->sh = NULL;
        }
-       ret = mlx5_hrxq_ibv_verify(dev);
+       ret = mlx5_hrxq_verify(dev);
        if (ret)
                DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
                        dev->data->port_id);
@@ -916,6 +987,7 @@ const struct eth_dev_ops mlx5_dev_ops = {
        .rx_queue_intr_enable = mlx5_rx_intr_enable,
        .rx_queue_intr_disable = mlx5_rx_intr_disable,
        .is_removed = mlx5_is_removed,
+       .udp_tunnel_port_add  = mlx5_udp_tunnel_port_add,
 };
 
 /* Available operations from secondary process. */
@@ -1215,8 +1287,6 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
                switch (spawn->pci_dev->id.device_id) {
                case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
                case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
-               case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
-               case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
                        if (config->txq_inline_min <
                                       (int)MLX5_INLINE_HSIZE_L2) {
                                DRV_LOG(DEBUG,
@@ -1302,6 +1372,7 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
                 * and PFC control may be broken, so disable feature.
                 */
                config->hw_vlan_insert = 0;
+               config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
                break;
        default:
                config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
@@ -1422,6 +1493,7 @@ mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset)
                rte_free(page);
        } else {
                /* Mark in bitmap that this door-bell is not in use. */
+               offset /= MLX5_DBR_SIZE;
                int i = offset / 64;
                int j = offset % 64;
 
@@ -1817,7 +1889,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                if (priv->counter_fallback)
                        DRV_LOG(INFO, "Use fall-back DV counter management\n");
                /* Check for LRO support. */
-               if (config.dest_tir && mprq && config.hca_attr.lro_cap) {
+               if (config.dest_tir && config.hca_attr.lro_cap) {
                        /* TBD check tunnel lro caps. */
                        config.lro.supported = config.hca_attr.lro_cap;
                        DRV_LOG(DEBUG, "Device supports LRO");
@@ -1830,8 +1902,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                                config.hca_attr.lro_timer_supported_periods[0];
                        DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
                                config.lro.timeout);
-                       config.mprq.enabled = 1;
-                       DRV_LOG(DEBUG, "Enable MPRQ for LRO use");
                }
        }
        if (config.mprq.enabled && mprq) {
@@ -1952,6 +2022,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        mlx5_set_min_inline(spawn, &config);
        /* Store device configuration on private structure. */
        priv->config = config;
+       /* Create context for virtual machine VLAN workaround. */
+       priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
        if (config.dv_flow_en) {
                err = mlx5_alloc_shared_dr(priv);
                if (err)
@@ -1964,11 +2036,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                goto error;
        }
        priv->config.flow_prio = err;
-       /* Add device to memory callback list. */
-       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
-       LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
-                        sh, mem_event_cb);
-       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
        return eth_dev;
 error:
        if (priv) {
@@ -1978,6 +2045,8 @@ error:
                        close(priv->nl_socket_route);
                if (priv->nl_socket_rdma >= 0)
                        close(priv->nl_socket_rdma);
+               if (priv->vmwa_context)
+                       mlx5_vlan_vmwa_exit(priv->vmwa_context);
                if (own_domain_id)
                        claim_zero(rte_eth_switch_domain_free(priv->domain_id));
                rte_free(priv);