net/mlx5: split multi-thread flow handling per OS
authorTal Shnaiderman <talshn@nvidia.com>
Thu, 7 Jan 2021 13:08:27 +0000 (15:08 +0200)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 13 Jan 2021 18:45:30 +0000 (19:45 +0100)
multi-threaded flows feature uses pthread function pthread_key_create
but for Windows the destruction option in the function is unimplemented.

To resolve it, Windows will implement destruction mechanism to cleanup
mlx5_flow_workspace object for each terminated thread.

Linux flow will keep the current behavior.

Signed-off-by: Tal Shnaiderman <talshn@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Acked-by: Khoa To <khot@microsoft.com>
drivers/net/mlx5/linux/meson.build
drivers/net/mlx5/linux/mlx5_flow_os.c [new file with mode: 0644]
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/windows/mlx5_flow_os.c

index 6c44021..8412edc 100644 (file)
@@ -9,5 +9,6 @@ sources += files(
        'mlx5_verbs.c',
        'mlx5_mp_os.c',
        'mlx5_vlan_os.c',
+       'mlx5_flow_os.c',
 )
 
diff --git a/drivers/net/mlx5/linux/mlx5_flow_os.c b/drivers/net/mlx5/linux/mlx5_flow_os.c
new file mode 100644 (file)
index 0000000..732b1b2
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2020 Mellanox Technologies, Ltd
+ */
+
+#include "mlx5_flow_os.h"
+
+#include <rte_thread.h>
+
+/* Key of thread specific flow workspace data. */
+static rte_tls_key key_workspace;
+
+int
+mlx5_flow_os_init_workspace_once(void)
+{
+       if (rte_thread_tls_key_create(&key_workspace, flow_release_workspace)) {
+               DRV_LOG(ERR, "Can't create flow workspace data thread key.");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void *
+mlx5_flow_os_get_specific_workspace(void)
+{
+       return rte_thread_tls_value_get(key_workspace);
+}
+
+int
+mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)
+{
+       return rte_thread_tls_value_set(key_workspace, data);
+}
+
+void
+mlx5_flow_os_release_workspace(void)
+{
+       rte_thread_tls_key_delete(key_workspace);
+}
index 023ef50..e47ad6b 100644 (file)
@@ -1004,6 +1004,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
                err = rte_errno;
                goto error;
        }
+       if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
+               err = mlx5_flow_os_init_workspace_once();
+               if (err)
+                       goto error;
+       }
        mlx5_flow_aging_init(sh);
        mlx5_flow_counters_mng_init(sh);
        mlx5_flow_ipool_create(sh, config);
@@ -1079,6 +1084,9 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
        mlx5_mr_release_cache(&sh->share_cache);
        /* Remove context from the global device list. */
        LIST_REMOVE(sh, next);
+       /* Release flow workspaces objects on the last device. */
+       if (LIST_EMPTY(&mlx5_dev_ctx_list))
+               mlx5_flow_os_release_workspace();
        pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
        /*
         *  Ensure there is no async event handler installed.
index 2a4073c..9c2e090 100644 (file)
@@ -696,11 +696,6 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = {
        },
 };
 
-/* Key of thread specific flow workspace data. */
-static pthread_key_t key_workspace;
-
-/* Thread specific flow workspace data once initialization data. */
-static pthread_once_t key_workspace_init;
 
 
 /**
@@ -5722,7 +5717,7 @@ mlx5_flow_start_default(struct rte_eth_dev *dev)
 /**
  * Release key of thread specific flow workspace data.
  */
-static void
+void
 flow_release_workspace(void *data)
 {
        struct mlx5_flow_workspace *wks = data;
@@ -5736,16 +5731,6 @@ flow_release_workspace(void *data)
        }
 }
 
-/**
- * Initialize key of thread specific flow workspace data.
- */
-static void
-flow_alloc_workspace(void)
-{
-       if (pthread_key_create(&key_workspace, flow_release_workspace))
-               DRV_LOG(ERR, "Can't create flow workspace data thread key.");
-}
-
 /**
  * Get thread specific current flow workspace.
  *
@@ -5756,7 +5741,7 @@ mlx5_flow_get_thread_workspace(void)
 {
        struct mlx5_flow_workspace *data;
 
-       data = pthread_getspecific(key_workspace);
+       data = mlx5_flow_os_get_specific_workspace();
        MLX5_ASSERT(data && data->inuse);
        if (!data || !data->inuse)
                DRV_LOG(ERR, "flow workspace not initialized.");
@@ -5804,11 +5789,7 @@ mlx5_flow_push_thread_workspace(void)
        struct mlx5_flow_workspace *curr;
        struct mlx5_flow_workspace *data;
 
-       if (pthread_once(&key_workspace_init, flow_alloc_workspace)) {
-               DRV_LOG(ERR, "Failed to init flow workspace data thread key.");
-               return NULL;
-       }
-       curr = pthread_getspecific(key_workspace);
+       curr = mlx5_flow_os_get_specific_workspace();
        if (!curr) {
                data = flow_alloc_thread_workspace();
                if (!data)
@@ -5827,7 +5808,7 @@ mlx5_flow_push_thread_workspace(void)
        data->inuse = 1;
        data->flow_idx = 0;
        /* Set as current workspace */
-       if (pthread_setspecific(key_workspace, data))
+       if (mlx5_flow_os_set_specific_workspace(data))
                DRV_LOG(ERR, "Failed to set flow workspace to thread.");
        return data;
 }
@@ -5853,7 +5834,7 @@ mlx5_flow_pop_thread_workspace(void)
        data->inuse = 0;
        if (!data->prev)
                return;
-       if (pthread_setspecific(key_workspace, data->prev))
+       if (mlx5_flow_os_set_specific_workspace(data->prev))
                DRV_LOG(ERR, "Failed to set flow workspace to thread.");
 }
 
index ee85c9d..329082a 100644 (file)
@@ -1503,4 +1503,11 @@ void flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list,
                                  struct mlx5_cache_entry *entry);
 struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
                                                    uint32_t age_idx);
+
+void flow_release_workspace(void *data);
+int mlx5_flow_os_init_workspace_once(void);
+void *mlx5_flow_os_get_specific_workspace(void);
+int mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data);
+void mlx5_flow_os_release_workspace(void);
+
 #endif /* RTE_PMD_MLX5_FLOW_H_ */
index acd7de6..2cc02df 100644 (file)
@@ -5,6 +5,8 @@
 #include "mlx5_flow_os.h"
 #include "mlx5_win_ext.h"
 
+#include <rte_thread.h>
+
 /**
  * Verify the @p attributes will be correctly understood by the NIC and store
  * them in the @p flow if everything is correct.
@@ -238,3 +240,179 @@ mlx5_flow_os_destroy_flow(void *drv_flow_ptr)
 {
        return mlx5_glue->devx_fs_rule_del(drv_flow_ptr);
 }
+
+struct mlx5_workspace_thread {
+       HANDLE  thread_handle;
+       struct mlx5_flow_workspace *mlx5_ws;
+       struct mlx5_workspace_thread *next;
+};
+
+/**
+ * Static pointer array for multi thread support of mlx5_flow_workspace.
+ */
+static struct mlx5_workspace_thread *curr;
+static struct mlx5_workspace_thread *first;
+rte_tls_key ws_tls_index;
+static pthread_mutex_t lock_thread_list;
+
+static bool
+mlx5_is_thread_alive(HANDLE thread_handle)
+{
+       DWORD result = WaitForSingleObject(thread_handle, 0);
+
+       if (result == WAIT_OBJECT_0)
+               return false;
+       return false;
+}
+
+static int
+mlx5_get_current_thread(HANDLE *p_handle)
+{
+       BOOL ret = DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
+               GetCurrentProcess(), p_handle, 0, 0, DUPLICATE_SAME_ACCESS);
+
+       if (!ret) {
+               RTE_LOG_WIN32_ERR("DuplicateHandle()");
+               return -1;
+       }
+       return 0;
+}
+
+static void
+mlx5_clear_thread_list(void)
+{
+       struct mlx5_workspace_thread *temp = first;
+       struct mlx5_workspace_thread *next, *prev = NULL;
+       HANDLE curr_thread;
+
+       if (!temp)
+               return;
+       if (mlx5_get_current_thread(&curr_thread)) {
+               DRV_LOG(ERR, "Failed to get current thread "
+                       "handle.");
+               return;
+       }
+       while (temp) {
+               next = temp->next;
+               if (temp->thread_handle != curr_thread &&
+                   !mlx5_is_thread_alive(temp->thread_handle)) {
+                       if (temp == first) {
+                               if (curr == temp)
+                                       curr = temp->next;
+                               first = temp->next;
+                       } else if (temp == curr) {
+                               curr = prev;
+                       }
+                       flow_release_workspace(temp->mlx5_ws);
+                       CloseHandle(temp->thread_handle);
+                       free(temp);
+                       if (prev)
+                               prev->next = next;
+                       temp = next;
+                       continue;
+               }
+               prev = temp;
+               temp = temp->next;
+       }
+       CloseHandle(curr_thread);
+}
+
+/**
+ * Release workspaces before exit.
+ */
+void
+mlx5_flow_os_release_workspace(void)
+{
+       mlx5_clear_thread_list();
+       if (first) {
+               MLX5_ASSERT(!first->next);
+               flow_release_workspace(first->mlx5_ws);
+               free(first);
+       }
+       rte_thread_tls_key_delete(ws_tls_index);
+       pthread_mutex_destroy(&lock_thread_list);
+}
+
+static int
+mlx5_add_workspace_to_list(struct mlx5_flow_workspace *data)
+{
+       HANDLE curr_thread;
+       struct mlx5_workspace_thread *temp = calloc(1, sizeof(*temp));
+
+       if (!temp) {
+               DRV_LOG(ERR, "Failed to allocate thread workspace "
+                       "memory.");
+               return -1;
+       }
+       if (mlx5_get_current_thread(&curr_thread)) {
+               DRV_LOG(ERR, "Failed to get current thread "
+                       "handle.");
+               free(temp);
+               return -1;
+       }
+       temp->mlx5_ws = data;
+       temp->thread_handle = curr_thread;
+       pthread_mutex_lock(&lock_thread_list);
+       mlx5_clear_thread_list();
+       if (!first) {
+               first = temp;
+               curr = temp;
+       } else {
+               curr->next = temp;
+               curr = curr->next;
+       }
+       pthread_mutex_unlock(&lock_thread_list);
+       return 0;
+}
+
+int
+mlx5_flow_os_init_workspace_once(void)
+{
+       int err = rte_thread_tls_key_create(&ws_tls_index, NULL);
+
+       if (err) {
+               DRV_LOG(ERR, "Can't create flow workspace data thread key.");
+               return err;
+       }
+       pthread_mutex_init(&lock_thread_list, NULL);
+       return 0;
+}
+
+void *
+mlx5_flow_os_get_specific_workspace(void)
+{
+       return rte_thread_tls_value_get(ws_tls_index);
+}
+
+int
+mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)
+{
+       int err = 0;
+       int old_err = rte_errno;
+
+       rte_errno = 0;
+       if (!rte_thread_tls_value_get(ws_tls_index)) {
+               if (rte_errno) {
+                       DRV_LOG(ERR, "Failed checking specific workspace.");
+                       rte_errno = old_err;
+                       return -1;
+               }
+               /*
+                * set_specific_workspace when current value is NULL
+                * can happen only once per thread, mark this thread in
+                * linked list to be able to release reasorces later on.
+                */
+               err = mlx5_add_workspace_to_list(data);
+               if (err) {
+                       DRV_LOG(ERR, "Failed adding workspace to list.");
+                       rte_errno = old_err;
+                       return -1;
+               }
+       }
+       if (rte_thread_tls_value_set(ws_tls_index, data)) {
+               DRV_LOG(ERR, "Failed setting specific workspace.");
+               err = -1;
+       }
+       rte_errno = old_err;
+       return err;
+}