This commit converts flow dev handle to indexed.
Change the mlx5 flow handle from pointer to uint32_t saves memory for
flow. With million flow, it saves several MBytes memory.
Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
static struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
{
.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
.trunk_size = 64,
.free = rte_free,
.type = "mlx5_jump_ipool",
},
+#endif
{
.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
.trunk_size = 64,
.free = rte_free,
.type = "mlx5_hrxq_ipool",
},
+ {
+ .size = sizeof(struct mlx5_flow_handle),
+ .trunk_size = 64,
+ .grow_trunk = 3,
+ .grow_shift = 2,
+ .need_lock = 0,
+ .release_mem_en = 1,
+ .malloc = rte_malloc_socket,
+ .free = rte_free,
+ .type = "mlx5_flow_handle_ipool",
+ },
};
*
* @param[in] sh
* Pointer to mlx5_ibv_shared object.
+ * @param[in] sh
+ * Pointer to user dev config.
*/
static void
-mlx5_flow_ipool_create(struct mlx5_ibv_shared *sh)
+mlx5_flow_ipool_create(struct mlx5_ibv_shared *sh,
+ const struct mlx5_dev_config *config __rte_unused)
{
uint8_t i;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ /*
+ * While DV is supported, user chooses the verbs mode,
+ * the mlx5 flow handle size is different with the
+ * MLX5_FLOW_HANDLE_VERBS_SIZE.
+ */
+ if (!config->dv_flow_en)
+ mlx5_ipool_cfg[MLX5_IPOOL_MLX5_FLOW].size =
+ MLX5_FLOW_HANDLE_VERBS_SIZE;
+#endif
for (i = 0; i < MLX5_IPOOL_MAX; ++i)
sh->ipool[i] = mlx5_ipool_create(&mlx5_ipool_cfg[i]);
}
goto error;
}
mlx5_flow_counters_mng_init(sh);
- mlx5_flow_ipool_create(sh);
+ mlx5_flow_ipool_create(sh, config);
/* Add device to memory callback list. */
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
enum mlx5_ipool_index {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
MLX5_IPOOL_PUSH_VLAN, /* Pool for push vlan resource. */
MLX5_IPOOL_TAG, /* Pool for tag resource. */
MLX5_IPOOL_PORT_ID, /* Pool for port id resource. */
MLX5_IPOOL_JUMP, /* Pool for jump resource. */
+#endif
MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */
+ MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
MLX5_IPOOL_MAX,
};
static void
flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dev_handle, next)
flow_drv_rxq_flags_set(dev, flow, dev_handle);
}
static void
flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dev_handle, next)
flow_drv_rxq_flags_trim(dev, flow, dev_handle);
}
flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
struct rte_flow *flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dev_handle, next)
if (dev_handle->qrss_id)
flow_qrss_free_id(dev, dev_handle->qrss_id);
}
dev_flow->flow = flow;
dev_flow->external = external;
/* Subflow object was created, we must include one in the list. */
- LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
+ SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+ dev_flow->handle, next);
/*
* If dev_flow is as one of the suffix flow, some actions in suffix
* flow may need some user defined item layer flags.
/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
}
- LIST_INIT(&flow->dev_handles);
+ flow->dev_handles = 0;
if (rss && rss->types) {
unsigned int graph_root;
goto error;
dev_flow->flow = flow;
dev_flow->external = 0;
- LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
+ SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+ dev_flow->handle, next);
ret = flow_drv_translate(dev, dev_flow, &attr_tx,
items_tx.items,
actions_hairpin_tx.actions, error);
/** Device flow handle structure: used both for creating & destroying. */
struct mlx5_flow_handle {
- LIST_ENTRY(mlx5_flow_handle) next;
- /**< Pointer to next device flow handle. */
+ SILIST_ENTRY(uint32_t)next;
+ /**< Index to next device flow handle. */
uint64_t layers;
/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
uint64_t act_flags;
struct mlx5_flow_verbs_workspace verbs;
};
struct mlx5_flow_handle *handle;
+ uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
};
/* Flow meter state. */
struct mlx5_flow_mreg_copy_resource *mreg_copy;
/**< pointer to metadata register copy table resource. */
struct mlx5_flow_meter *meter; /**< Holds flow meter. */
- LIST_HEAD(dev_handles, mlx5_flow_handle) dev_handles;
+ uint32_t dev_handles;
/**< Device flow handles that are part of the flow. */
struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error)
{
- size_t size = sizeof(struct mlx5_flow_handle);
+ uint32_t handle_idx = 0;
struct mlx5_flow *dev_flow;
struct mlx5_flow_handle *dev_handle;
struct mlx5_priv *priv = dev->data->dev_private;
"not free temporary device flow");
return NULL;
}
- dev_handle = rte_calloc(__func__, 1, size, 0);
+ dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ &handle_idx);
if (!dev_handle) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
/* No multi-thread supporting. */
dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
dev_flow->handle = dev_handle;
+ dev_flow->handle_idx = handle_idx;
dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
/*
* The matching value needs to be cleared to 0 before using. In the
struct mlx5_flow_handle_dv *dv_h;
struct mlx5_flow *dev_flow;
struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
int n;
int err;
int idx;
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(dh, &flow->dev_handles, next) {
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dh, next) {
if (dh->hrxq) {
if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct mlx5_flow_handle *dh;
+ uint32_t handle_idx;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (!flow)
return;
- LIST_FOREACH(dh, &flow->dev_handles, next) {
+ handle_idx = flow->dev_handles;
+ while (handle_idx) {
+ dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ handle_idx);
+ if (!dh)
+ return;
if (dh->ib_flow) {
claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
dh->ib_flow = NULL;
}
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
+ handle_idx = dh->next.next;
}
}
__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct mlx5_flow_handle *dev_handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (!flow)
return;
mlx5_flow_meter_detach(flow->meter);
flow->meter = NULL;
}
- while (!LIST_EMPTY(&flow->dev_handles)) {
- dev_handle = LIST_FIRST(&flow->dev_handles);
- LIST_REMOVE(dev_handle, next);
+ while (flow->dev_handles) {
+ uint32_t tmp_idx = flow->dev_handles;
+
+ dev_handle = mlx5_ipool_get(priv->sh->ipool
+ [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
+ if (!dev_handle)
+ return;
+ flow->dev_handles = dev_handle->next.next;
if (dev_handle->dvh.matcher)
flow_dv_matcher_release(dev, dev_handle);
if (dev_handle->dvh.encap_decap)
if (dev_handle->dvh.tag_resource)
flow_dv_tag_release(dev,
dev_handle->dvh.tag_resource);
- rte_free(dev_handle);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ tmp_idx);
}
}
struct rte_flow_error *error)
{
size_t size = 0;
+ uint32_t handle_idx = 0;
struct mlx5_flow *dev_flow;
struct mlx5_flow_handle *dev_handle;
struct mlx5_priv *priv = dev->data->dev_private;
"not free temporary device flow");
return NULL;
}
- dev_handle = rte_calloc(__func__, 1, MLX5_FLOW_HANDLE_VERBS_SIZE, 0);
+ dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ &handle_idx);
if (!dev_handle) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
/* No multi-thread supporting. */
dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
dev_flow->handle = dev_handle;
+ dev_flow->handle_idx = handle_idx;
/* Memcpy is used, only size needs to be cleared to 0. */
dev_flow->verbs.size = 0;
dev_flow->verbs.attr.num_of_specs = 0;
static void
flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *handle;
+ uint32_t handle_idx;
if (!flow)
return;
- LIST_FOREACH(handle, &flow->dev_handles, next) {
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, handle, next) {
if (handle->ib_flow) {
claim_zero(mlx5_glue->destroy_flow(handle->ib_flow));
handle->ib_flow = NULL;
static void
flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *handle;
if (!flow)
return;
flow_verbs_remove(dev, flow);
- while (!LIST_EMPTY(&flow->dev_handles)) {
- handle = LIST_FIRST(&flow->dev_handles);
- LIST_REMOVE(handle, next);
- rte_free(handle);
+ while (flow->dev_handles) {
+ uint32_t tmp_idx = flow->dev_handles;
+
+ handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ tmp_idx);
+ if (!handle)
+ return;
+ flow->dev_handles = handle->next.next;
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ tmp_idx);
}
if (flow->counter) {
flow_verbs_counter_release(dev, flow->counter);
struct mlx5_flow_handle *handle;
struct mlx5_flow *dev_flow;
struct mlx5_hrxq *hrxq;
+ uint32_t dev_handles;
int err;
int idx;
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(handle, &flow->dev_handles, next) {
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ dev_handles, handle, next) {
if (handle->hrxq) {
if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);