static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
+ {
+ .size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
+ .trunk_size = 64,
+ .grow_trunk = 3,
+ .grow_shift = 2,
+ .need_lock = 0,
+ .release_mem_en = 1,
+ .malloc = rte_malloc_socket,
+ .free = rte_free,
+ .type = "mlx5_encap_decap_ipool",
+ },
+};
+
+
#define MLX5_FLOW_MIN_ID_POOL_SIZE 512
#define MLX5_ID_GENERATION_ARRAY_FACTOR 16
memset(&sh->cmng, 0, sizeof(sh->cmng));
}
+/**
+ * Initialize the flow resources' indexed mempool.
+ *
+ * @param[in] sh
+ * Pointer to mlx5_ibv_shared object.
+ */
+static void
+mlx5_flow_ipool_create(struct mlx5_ibv_shared *sh)
+{
+ uint8_t i;
+
+ for (i = 0; i < MLX5_IPOOL_MAX; ++i)
+ sh->ipool[i] = mlx5_ipool_create(&mlx5_ipool_cfg[i]);
+}
+
+/**
+ * Release the flow resources' indexed mempool.
+ *
+ * @param[in] sh
+ * Pointer to mlx5_ibv_shared object.
+ */
+static void
+mlx5_flow_ipool_destroy(struct mlx5_ibv_shared *sh)
+{
+ uint8_t i;
+
+ for (i = 0; i < MLX5_IPOOL_MAX; ++i)
+ mlx5_ipool_destroy(sh->ipool[i]);
+}
+
/**
* Extract pdn of PD object using DV API.
*
goto error;
}
mlx5_flow_counters_mng_init(sh);
+ mlx5_flow_ipool_create(sh);
/* Add device to memory callback list. */
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
* Only primary process handles async device events.
**/
mlx5_flow_counters_mng_close(sh);
+ mlx5_flow_ipool_destroy(sh);
MLX5_ASSERT(!sh->intr_cnt);
if (sh->intr_cnt)
mlx5_intr_callback_unregister
#include "mlx5_utils.h"
#include "mlx5_autoconf.h"
+
+enum mlx5_ipool_index {
+ MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
+ MLX5_IPOOL_MAX,
+};
+
/** Key string for IPC. */
#define MLX5_MP_NAME "net_mlx5_mp"
/* Direct Rules tables for FDB, NIC TX+RX */
void *esw_drop_action; /* Pointer to DR E-Switch drop action. */
void *pop_vlan_action; /* Pointer to DR pop VLAN action. */
- LIST_HEAD(encap_decap, mlx5_flow_dv_encap_decap_resource) encaps_decaps;
+ uint32_t encaps_decaps; /* Encap/decap action indexed memory list. */
LIST_HEAD(modify_cmd, mlx5_flow_dv_modify_hdr_resource) modify_cmds;
struct mlx5_hlist *tag_table;
LIST_HEAD(port_id_action_list, mlx5_flow_dv_port_id_action_resource)
LIST_HEAD(push_vlan_action_list, mlx5_flow_dv_push_vlan_action_resource)
push_vlan_action_list; /* List of push VLAN actions. */
struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
+ struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
+ /* Memory Pool for mlx5 flow resources. */
/* Shared interrupt handler section. */
pthread_mutex_t intr_mutex; /* Interrupt config mutex. */
uint32_t intr_cnt; /* Interrupt handler reference counter. */
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
+ uint32_t idx = 0;
resource->flags = dev_flow->dv.group ? 0 : 1;
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
else
domain = sh->tx_domain;
/* Lookup a matching resource from cache. */
- LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
+ ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], sh->encaps_decaps, idx,
+ cache_resource, next) {
if (resource->reformat_type == cache_resource->reformat_type &&
resource->ft_type == cache_resource->ft_type &&
resource->flags == cache_resource->flags &&
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->dvh.encap_decap = cache_resource;
+ dev_flow->handle->dvh.encap_decap = idx;
+ dev_flow->dv.encap_decap = cache_resource;
return 0;
}
}
/* Register new encap/decap resource. */
- cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
+ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+ &dev_flow->handle->dvh.encap_decap);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
- LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
- dev_flow->handle->dvh.encap_decap = cache_resource;
+ ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps,
+ dev_flow->handle->dvh.encap_decap, cache_resource, next);
+ dev_flow->dv.encap_decap = cache_resource;
DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
const struct rte_flow_action_raw_encap *encap_data;
struct mlx5_flow_dv_encap_decap_resource res;
+ memset(&res, 0, sizeof(res));
encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
res.size = encap_data->size;
memcpy(res.buf, encap_data->data, res.size);
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- handle->dvh.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->verbs_action;
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- handle->dvh.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->verbs_action;
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
(dev, actions, dev_flow, attr, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- handle->dvh.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->verbs_action;
} else {
/* Handle encap without preceding decap. */
if (flow_dv_create_action_l2_encap
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- handle->dvh.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->verbs_action;
}
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
(dev, dev_flow, attr->transfer, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- handle->dvh.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->verbs_action;
}
/* If decap is followed by encap, handle it at encap. */
action_flags |= MLX5_FLOW_ACTION_DECAP;
/**
* Release an encap/decap resource.
*
+ * @param dev
+ * Pointer to Ethernet device.
* @param handle
* Pointer to mlx5_flow_handle.
*
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_encap_decap_resource_release(struct mlx5_flow_handle *handle)
+flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
{
- struct mlx5_flow_dv_encap_decap_resource *cache_resource =
- handle->dvh.encap_decap;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t idx = handle->dvh.encap_decap;
+ struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+ cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+ idx);
+ if (!cache_resource)
+ return 0;
MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
(void *)cache_resource,
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
claim_zero(mlx5_glue->destroy_flow_action
(cache_resource->verbs_action));
- LIST_REMOVE(cache_resource, next);
- rte_free(cache_resource);
+ ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+ &priv->sh->encaps_decaps, idx,
+ cache_resource, next);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
DRV_LOG(DEBUG, "encap/decap resource %p: removed",
(void *)cache_resource);
return 0;
if (dev_handle->dvh.matcher)
flow_dv_matcher_release(dev, dev_handle);
if (dev_handle->dvh.encap_decap)
- flow_dv_encap_decap_resource_release(dev_handle);
+ flow_dv_encap_decap_resource_release(dev, dev_handle);
if (dev_handle->dvh.modify_hdr)
flow_dv_modify_hdr_resource_release(dev_handle);
if (dev_handle->dvh.jump)