/* Decap will be used or not. */
#define MLX5_DECAP_EN "decap_en"
+/* Device parameter to configure allow or prevent duplicate rules pattern. */
+#define MLX5_ALLOW_DUPLICATE_PATTERN "allow_duplicate_pattern"
+
/* Shared memory between primary and secondary processes. */
struct mlx5_shared_data *mlx5_shared_data;
.grow_trunk = 3,
.grow_shift = 2,
.need_lock = 1,
- .release_mem_en = 1,
+ .release_mem_en = 0,
+ .per_core_cache = (1 << 16),
.malloc = mlx5_malloc,
.free = mlx5_free,
.type = "mlx5_tag_ipool",
.grow_trunk = 3,
.grow_shift = 2,
.need_lock = 1,
- .release_mem_en = 1,
+ .release_mem_en = 0,
+ .per_core_cache = 1 << 19,
.malloc = mlx5_malloc,
.free = mlx5_free,
.type = "mlx5_flow_handle_ipool",
#define MLX5_FLOW_MIN_ID_POOL_SIZE 512
#define MLX5_ID_GENERATION_ARRAY_FACTOR 16
-#define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096
+#define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 1024
/**
* Decide whether representor ID is a HPF(host PF) port on BF2.
if (sh->meter_aso_en) {
rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl);
LIST_INIT(&sh->mtrmng->pools_mng.meters);
- sh->mtrmng->policy_idx_tbl =
- mlx5_l3t_create(MLX5_L3T_TYPE_DWORD);
}
sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
}
MLX5_FLOW_HANDLE_VERBS_SIZE;
break;
}
- if (config->reclaim_mode)
+ if (config->reclaim_mode) {
cfg.release_mem_en = 1;
+ cfg.per_core_cache = 0;
+ } else {
+ cfg.release_mem_en = 0;
+ }
sh->ipool[i] = mlx5_ipool_create(&cfg);
}
}
+
/**
* Release the flow resources' indexed mempool.
*
for (i = 0; i < MLX5_IPOOL_MAX; ++i)
mlx5_ipool_destroy(sh->ipool[i]);
+ for (i = 0; i < MLX5_MAX_MODIFY_NUM; ++i)
+ if (sh->mdh_ipools[i])
+ mlx5_ipool_destroy(sh->mdh_ipools[i]);
}
/*
}
sh->refcnt = 1;
sh->max_port = spawn->max_port;
+ sh->reclaim_mode = config->reclaim_mode;
strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx),
sizeof(sh->ibdev_name) - 1);
strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->ctx),
/* Tables are only used in DV and DR modes. */
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
struct mlx5_dev_ctx_shared *sh = priv->sh;
- char s[MLX5_HLIST_NAMESIZE];
+ char s[MLX5_NAME_SIZE];
MLX5_ASSERT(sh);
snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
- 0, 0, flow_dv_tbl_create_cb,
+ false, true, sh,
+ flow_dv_tbl_create_cb,
flow_dv_tbl_match_cb,
- flow_dv_tbl_remove_cb);
+ flow_dv_tbl_remove_cb,
+ flow_dv_tbl_clone_cb,
+ flow_dv_tbl_clone_free_cb);
if (!sh->flow_tbls) {
DRV_LOG(ERR, "flow tables with hash creation failed.");
err = ENOMEM;
return err;
}
- sh->flow_tbls->ctx = sh;
#ifndef HAVE_MLX5DV_DR
struct rte_flow_error error;
struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id];
* If all the flows are already flushed in the device stop stage,
* then this will return directly without any action.
*/
- mlx5_flow_list_flush(dev, &priv->flows, true);
+ mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
mlx5_action_handle_flush(dev);
mlx5_flow_meter_flush(dev, NULL);
/* Prevent crashes when queues are still in use. */
if (ret)
DRV_LOG(WARNING, "port %u some flows still remain",
dev->data->port_id);
- mlx5_cache_list_destroy(&priv->hrxqs);
+ if (priv->hrxqs)
+ mlx5_list_destroy(priv->hrxqs);
/*
* Free the shared context in last turn, because the cleanup
* routines above may use some shared fields, like
config->sys_mem_en = !!tmp;
} else if (strcmp(MLX5_DECAP_EN, key) == 0) {
config->decap_en = !!tmp;
+ } else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
+ config->allow_duplicate_pattern = !!tmp;
} else {
DRV_LOG(WARNING, "%s: unknown parameter", key);
rte_errno = EINVAL;
MLX5_RECLAIM_MEM,
MLX5_SYS_MEM_EN,
MLX5_DECAP_EN,
+ MLX5_ALLOW_DUPLICATE_PATTERN,
NULL,
};
struct rte_kvargs *kvlist;