net/mlx5: make flow list thread safe
[dpdk.git] / drivers / net / mlx5 / linux / mlx5_os.c
index 40f9446..a579dde 100644 (file)
@@ -291,6 +291,12 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
                sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
        }
 #endif
+       if (!sh->tunnel_hub)
+               err = mlx5_alloc_tunnel_hub(sh);
+       if (err) {
+               DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err);
+               goto error;
+       }
        if (priv->config.reclaim_mode == MLX5_RCM_AGGR) {
                mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1);
                mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1);
@@ -335,6 +341,10 @@ error:
                mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
                sh->tag_table = NULL;
        }
+       if (sh->tunnel_hub) {
+               mlx5_release_tunnel_hub(sh, priv->dev_port);
+               sh->tunnel_hub = NULL;
+       }
        mlx5_free_table_hash_list(priv);
        return err;
 }
@@ -391,6 +401,10 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv)
                mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
                sh->tag_table = NULL;
        }
+       if (sh->tunnel_hub) {
+               mlx5_release_tunnel_hub(sh, priv->dev_port);
+               sh->tunnel_hub = NULL;
+       }
        mlx5_free_table_hash_list(priv);
 }
 
@@ -733,6 +747,10 @@ err_secondary:
                        strerror(rte_errno));
                goto error;
        }
+       if (config->dv_miss_info) {
+               if (switch_info->master || switch_info->representor)
+                       config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
+       }
        mlx5_malloc_mem_select(config->sys_mem_en);
        sh = mlx5_alloc_shared_dev_ctx(spawn, config);
        if (!sh)
@@ -1340,6 +1358,7 @@ err_secondary:
                                      MLX5_MAX_MAC_ADDRESSES);
        priv->flows = 0;
        priv->ctrl_flows = 0;
+       rte_spinlock_init(&priv->flow_list_lock);
        TAILQ_INIT(&priv->flow_meters);
        TAILQ_INIT(&priv->flow_meter_profiles);
        /* Hint libmlx5 to use PMD allocator for data plane resources */
@@ -1377,17 +1396,6 @@ err_secondary:
                err = mlx5_alloc_shared_dr(priv);
                if (err)
                        goto error;
-               /*
-                * RSS id is shared with meter flow id. Meter flow id can only
-                * use the 24 MSB of the register.
-                */
-               priv->qrss_id_pool = mlx5_flow_id_pool_alloc(UINT32_MAX >>
-                                    MLX5_MTR_COLOR_BITS);
-               if (!priv->qrss_id_pool) {
-                       DRV_LOG(ERR, "can't create flow id pool");
-                       err = ENOMEM;
-                       goto error;
-               }
        }
        if (config->devx && config->dv_flow_en && config->dest_tir) {
                priv->obj_ops = devx_obj_ops;
@@ -1431,11 +1439,6 @@ err_secondary:
                        err = ENOTSUP;
                        goto error;
        }
-       /*
-        * Allocate the buffer for flow creating, just once.
-        * The allocation must be done before any flow creating.
-        */
-       mlx5_flow_alloc_intermediate(eth_dev);
        /* Query availability of metadata reg_c's. */
        err = mlx5_flow_discover_mreg_c(eth_dev);
        if (err < 0) {
@@ -1479,8 +1482,6 @@ error:
                        close(priv->nl_socket_rdma);
                if (priv->vmwa_context)
                        mlx5_vlan_vmwa_exit(priv->vmwa_context);
-               if (priv->qrss_id_pool)
-                       mlx5_flow_id_pool_release(priv->qrss_id_pool);
                if (own_domain_id)
                        claim_zero(rte_eth_switch_domain_free(priv->domain_id));
                mlx5_free(priv);
@@ -2552,6 +2553,12 @@ const struct eth_dev_ops mlx5_os_dev_ops = {
        .get_module_eeprom = mlx5_get_module_eeprom,
        .hairpin_cap_get = mlx5_hairpin_cap_get,
        .mtr_ops_get = mlx5_flow_meter_ops_get,
+       .hairpin_bind = mlx5_hairpin_bind,
+       .hairpin_unbind = mlx5_hairpin_unbind,
+       .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports,
+       .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update,
+       .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind,
+       .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind,
 };
 
 /* Available operations from secondary process. */
@@ -2630,4 +2637,10 @@ const struct eth_dev_ops mlx5_os_dev_ops_isolate = {
        .get_module_eeprom = mlx5_get_module_eeprom,
        .hairpin_cap_get = mlx5_hairpin_cap_get,
        .mtr_ops_get = mlx5_flow_meter_ops_get,
+       .hairpin_bind = mlx5_hairpin_bind,
+       .hairpin_unbind = mlx5_hairpin_unbind,
+       .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports,
+       .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update,
+       .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind,
+       .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind,
 };