common/mlx5: add memory reclaim glue function
authorSuanming Mou <suanmingm@mellanox.com>
Mon, 1 Jun 2020 06:09:42 +0000 (14:09 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 3 Jun 2020 15:19:26 +0000 (17:19 +0200)
While flow destroyed, rdma-core may still cache some resources for more
efficiently flow recreate. In case the peak time that millions of flows
created and destroyed, the cached resources will be very huge.

Currently, rdma-core provides the new function to configure the flow
resources not to be cached. Add the memory reclaim function to avoid
too many resources be cached.

This is the first patch for the memory reclaim. A new devarg will be
added to PMD to support the reclaim can be configured.

Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
drivers/common/mlx5/Makefile
drivers/common/mlx5/linux/meson.build
drivers/common/mlx5/linux/mlx5_glue.c
drivers/common/mlx5/linux/mlx5_glue.h

index e9863b5..4948fef 100644 (file)
@@ -195,6 +195,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
                infiniband/mlx5dv.h \
                func mlx5dv_alloc_var \
                $(AUTOCONF_OUTPUT)
+       $Q sh -- '$<' '$@' \
+               HAVE_MLX5DV_DR_MEM_RECLAIM \
+               infiniband/mlx5dv.h \
+               func mlx5dv_dr_domain_set_reclaim_device_memory \
+               $(AUTOCONF_OUTPUT)
        $Q sh -- '$<' '$@' \
                HAVE_ETHTOOL_LINK_MODE_25G \
                /usr/include/linux/ethtool.h \
index f0016fa..ce065d9 100644 (file)
@@ -159,6 +159,8 @@ has_sym_args = [
        'RDMA_NLDEV_ATTR_NDEV_INDEX' ],
        [ 'HAVE_MLX5_DR_FLOW_DUMP', 'infiniband/mlx5dv.h',
        'mlx5dv_dump_dr_domain'],
+       [ 'HAVE_MLX5DV_DR_MEM_RECLAIM', 'infiniband/mlx5dv.h',
+       'mlx5dv_dr_domain_set_reclaim_device_memory'],
        [ 'HAVE_DEVLINK', 'linux/devlink.h', 'DEVLINK_GENL_NAME' ],
 ]
 config = configuration_data()
index d48660b..c91ee33 100644 (file)
@@ -1182,6 +1182,18 @@ mlx5_glue_dv_free_var(struct mlx5dv_var *var)
 #endif
 }
 
+
+static void
+mlx5_glue_dr_reclaim_domain_memory(void *domain, uint32_t enable)
+{
+#ifdef HAVE_MLX5DV_DR_MEM_RECLAIM
+       mlx5dv_dr_domain_set_reclaim_device_memory(domain, enable);
+#else
+       (void)(enable);
+       (void)(domain);
+#endif
+}
+
 __rte_cache_aligned
 const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue) {
        .version = MLX5_GLUE_VERSION,
@@ -1281,6 +1293,7 @@ const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue) {
        .devx_qp_query = mlx5_glue_devx_qp_query,
        .devx_port_query = mlx5_glue_devx_port_query,
        .dr_dump_domain = mlx5_glue_dr_dump_domain,
+       .dr_reclaim_domain_memory = mlx5_glue_dr_reclaim_domain_memory,
        .devx_query_eqn = mlx5_glue_devx_query_eqn,
        .devx_create_event_channel = mlx5_glue_devx_create_event_channel,
        .devx_destroy_event_channel = mlx5_glue_devx_destroy_event_channel,
index 81d6a22..5d238a4 100644 (file)
@@ -302,6 +302,7 @@ struct mlx5_glue {
                        (struct mlx5dv_devx_event_channel *event_channel,
                         struct mlx5dv_devx_async_event_hdr *event_data,
                         size_t event_resp_len);
+       void (*dr_reclaim_domain_memory)(void *domain, uint32_t enable);
 };
 
 extern const struct mlx5_glue *mlx5_glue;