#include <mlx5_common.h>
#include <mlx5_common_os.h>
#include <mlx5_common_mp.h>
+#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_encap_decap_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_push_vlan_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_tag_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_port_id_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_jump_ipool",
},
#endif
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_meter_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_mcp_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_hrxq_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_flow_handle_ipool",
},
{
.trunk_size = 4096,
.need_lock = 1,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "rte_flow_ipool",
},
};
struct mlx5_flow_id_pool *pool;
void *mem;
- pool = rte_zmalloc("id pool allocation", sizeof(*pool),
- RTE_CACHE_LINE_SIZE);
+ pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (!pool) {
DRV_LOG(ERR, "can't allocate id pool");
rte_errno = ENOMEM;
return NULL;
}
- mem = rte_zmalloc("", MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
+ mem = mlx5_malloc(MLX5_MEM_ZERO,
+ MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (!mem) {
DRV_LOG(ERR, "can't allocate mem for id pool");
rte_errno = ENOMEM;
pool->max_id = max_id;
return pool;
error:
- rte_free(pool);
+ mlx5_free(pool);
return NULL;
}
void
mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool)
{
- rte_free(pool->free_arr);
- rte_free(pool);
+ mlx5_free(pool->free_arr);
+ mlx5_free(pool);
}
/**
size = pool->curr - pool->free_arr;
size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR;
MLX5_ASSERT(size2 > size);
- mem = rte_malloc("", size2 * sizeof(uint32_t), 0);
+ mem = mlx5_malloc(0, size2 * sizeof(uint32_t), 0,
+ SOCKET_ID_ANY);
if (!mem) {
DRV_LOG(ERR, "can't allocate mem for id pool");
rte_errno = ENOMEM;
return -rte_errno;
}
memcpy(mem, pool->free_arr, size * sizeof(uint32_t));
- rte_free(pool->free_arr);
+ mlx5_free(pool->free_arr);
pool->free_arr = mem;
pool->curr = pool->free_arr + size;
pool->last = pool->free_arr + size2;
LIST_REMOVE(mng, next);
claim_zero(mlx5_devx_cmd_destroy(mng->dm));
claim_zero(mlx5_glue->devx_umem_dereg(mng->umem));
- rte_free(mem);
+ mlx5_free(mem);
}
/**
(pool, j)->dcs));
}
TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, next);
- rte_free(pool);
+ mlx5_free(pool);
pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
}
- rte_free(sh->cmng.ccont[i].pools);
+ mlx5_free(sh->cmng.ccont[i].pools);
}
mng = LIST_FIRST(&sh->cmng.mem_mngs);
while (mng) {
entry);
MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
- rte_free(tbl_data);
+ mlx5_free(tbl_data);
}
table_key.direction = 1;
pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
entry);
MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
- rte_free(tbl_data);
+ mlx5_free(tbl_data);
}
table_key.direction = 0;
table_key.domain = 1;
entry);
MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
- rte_free(tbl_data);
+ mlx5_free(tbl_data);
}
mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
}
.direction = 0,
}
};
- struct mlx5_flow_tbl_data_entry *tbl_data = rte_zmalloc(NULL,
- sizeof(*tbl_data), 0);
+ struct mlx5_flow_tbl_data_entry *tbl_data = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*tbl_data), 0,
+ SOCKET_ID_ANY);
if (!tbl_data) {
err = ENOMEM;
rte_atomic32_init(&tbl_data->tbl.refcnt);
rte_atomic32_inc(&tbl_data->tbl.refcnt);
table_key.direction = 1;
- tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+ tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
+ SOCKET_ID_ANY);
if (!tbl_data) {
err = ENOMEM;
goto error;
rte_atomic32_inc(&tbl_data->tbl.refcnt);
table_key.direction = 0;
table_key.domain = 1;
- tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+ tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
+ SOCKET_ID_ANY);
if (!tbl_data) {
err = ENOMEM;
goto error;
mlx5_mprq_free_mp(dev);
mlx5_os_free_shared_dr(priv);
if (priv->rss_conf.rss_key != NULL)
- rte_free(priv->rss_conf.rss_key);
+ mlx5_free(priv->rss_conf.rss_key);
if (priv->reta_idx != NULL)
- rte_free(priv->reta_idx);
+ mlx5_free(priv->reta_idx);
if (priv->config.vf)
mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
dev->data->mac_addrs,
#include <rte_rwlock.h>
#include <rte_cycles.h>
+#include <mlx5_malloc.h>
+
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
return -rte_errno;
}
priv->rss_conf.rss_key =
- rte_realloc(priv->rss_conf.rss_key,
- MLX5_RSS_HASH_KEY_LEN, 0);
+ mlx5_realloc(priv->rss_conf.rss_key, MLX5_MEM_RTE,
+ MLX5_RSS_HASH_KEY_LEN, 0, SOCKET_ID_ANY);
if (!priv->rss_conf.rss_key) {
DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
dev->data->port_id, rxqs_n);
if (priv->skip_default_rss_reta)
return ret;
- rss_queue_arr = rte_malloc("", rxqs_n * sizeof(unsigned int), 0);
+ rss_queue_arr = mlx5_malloc(0, rxqs_n * sizeof(unsigned int), 0,
+ SOCKET_ID_ANY);
if (!rss_queue_arr) {
DRV_LOG(ERR, "port %u cannot allocate RSS queue list (%u)",
dev->data->port_id, rxqs_n);
DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
dev->data->port_id, rss_queue_n);
rte_errno = EINVAL;
- rte_free(rss_queue_arr);
+ mlx5_free(rss_queue_arr);
return -rte_errno;
}
DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
rss_queue_n));
ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
if (ret) {
- rte_free(rss_queue_arr);
+ mlx5_free(rss_queue_arr);
return ret;
}
/*
if (++j == rss_queue_n)
j = 0;
}
- rte_free(rss_queue_arr);
+ mlx5_free(rss_queue_arr);
return ret;
}
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_prm.h>
+#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
sizeof(struct rte_flow_action_set_tag) +
sizeof(struct rte_flow_action_jump);
- ext_actions = rte_zmalloc(__func__, act_size, 0);
+ ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
+ SOCKET_ID_ANY);
if (!ext_actions)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_ACTION,
*/
act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
sizeof(struct mlx5_flow_action_copy_mreg);
- ext_actions = rte_zmalloc(__func__, act_size, 0);
+ ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
+ SOCKET_ID_ANY);
if (!ext_actions)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_ACTION,
* by flow_drv_destroy.
*/
flow_qrss_free_id(dev, qrss_id);
- rte_free(ext_actions);
+ mlx5_free(ext_actions);
return ret;
}
#define METER_SUFFIX_ITEM 4
item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
sizeof(struct mlx5_rte_flow_item_tag) * 2;
- sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0);
+ sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size),
+ 0, SOCKET_ID_ANY);
if (!sfx_actions)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_ACTION,
external, flow_idx, error);
exit:
if (sfx_actions)
- rte_free(sfx_actions);
+ mlx5_free(sfx_actions);
return ret;
}
}
if (priv_fdir_flow) {
LIST_REMOVE(priv_fdir_flow, next);
- rte_free(priv_fdir_flow->fdir);
- rte_free(priv_fdir_flow);
+ mlx5_free(priv_fdir_flow->fdir);
+ mlx5_free(priv_fdir_flow);
}
}
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
struct mlx5_priv *priv = dev->data->dev_private;
if (!priv->inter_flows) {
- priv->inter_flows = rte_calloc(__func__, 1,
+ priv->inter_flows = mlx5_malloc(MLX5_MEM_ZERO,
MLX5_NUM_MAX_DEV_FLOWS *
sizeof(struct mlx5_flow) +
(sizeof(struct mlx5_flow_rss_desc) +
- sizeof(uint16_t) * UINT16_MAX) * 2, 0);
+ sizeof(uint16_t) * UINT16_MAX) * 2, 0,
+ SOCKET_ID_ANY);
if (!priv->inter_flows) {
DRV_LOG(ERR, "can't allocate intermediate memory.");
return;
{
struct mlx5_priv *priv = dev->data->dev_private;
- rte_free(priv->inter_flows);
+ mlx5_free(priv->inter_flows);
priv->inter_flows = NULL;
}
uint32_t flow_idx;
int ret;
- fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
+ fdir_flow = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*fdir_flow), 0,
+ SOCKET_ID_ANY);
if (!fdir_flow) {
rte_errno = ENOMEM;
return -rte_errno;
rte_errno = EEXIST;
goto error;
}
- priv_fdir_flow = rte_zmalloc(__func__, sizeof(struct mlx5_fdir_flow),
- 0);
+ priv_fdir_flow = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5_fdir_flow),
+ 0, SOCKET_ID_ANY);
if (!priv_fdir_flow) {
rte_errno = ENOMEM;
goto error;
dev->data->port_id, (void *)flow);
return 0;
error:
- rte_free(priv_fdir_flow);
- rte_free(fdir_flow);
+ mlx5_free(priv_fdir_flow);
+ mlx5_free(fdir_flow);
return -rte_errno;
}
LIST_REMOVE(priv_fdir_flow, next);
flow_idx = priv_fdir_flow->rix_flow;
flow_list_destroy(dev, &priv->flows, flow_idx);
- rte_free(priv_fdir_flow->fdir);
- rte_free(priv_fdir_flow);
+ mlx5_free(priv_fdir_flow->fdir);
+ mlx5_free(priv_fdir_flow);
DRV_LOG(DEBUG, "port %u deleted FDIR flow %u",
dev->data->port_id, flow_idx);
return 0;
priv_fdir_flow = LIST_FIRST(&priv->fdir_flows);
LIST_REMOVE(priv_fdir_flow, next);
flow_list_destroy(dev, &priv->flows, priv_fdir_flow->rix_flow);
- rte_free(priv_fdir_flow->fdir);
- rte_free(priv_fdir_flow);
+ mlx5_free(priv_fdir_flow->fdir);
+ mlx5_free(priv_fdir_flow);
}
}
#include <mlx5_devx_cmds.h>
#include <mlx5_prm.h>
+#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
(sh->ctx, domain, cache_resource,
&cache_resource->action);
if (ret) {
- rte_free(cache_resource);
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
(priv->sh->fdb_domain, resource->port_id,
&cache_resource->action);
if (ret) {
- rte_free(cache_resource);
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
(domain, resource->vlan_tag,
&cache_resource->action);
if (ret) {
- rte_free(cache_resource);
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
}
}
/* Register new modify-header resource. */
- cache_resource = rte_calloc(__func__, 1,
- sizeof(*cache_resource) + actions_len, 0);
+ cache_resource = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*cache_resource) + actions_len, 0,
+ SOCKET_ID_ANY);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
(sh->ctx, ns, cache_resource,
actions_len, &cache_resource->action);
if (ret) {
- rte_free(cache_resource);
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
MLX5_COUNTERS_PER_POOL +
sizeof(struct mlx5_counter_stats_raw)) * raws_n +
sizeof(struct mlx5_counter_stats_mem_mng);
- uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
+ uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, sysconf(_SC_PAGESIZE),
+ SOCKET_ID_ANY);
int i;
if (!mem) {
IBV_ACCESS_LOCAL_WRITE);
if (!mem_mng->umem) {
rte_errno = errno;
- rte_free(mem);
+ mlx5_free(mem);
return NULL;
}
mkey_attr.addr = (uintptr_t)mem;
if (!mem_mng->dm) {
mlx5_glue->devx_umem_dereg(mem_mng->umem);
rte_errno = errno;
- rte_free(mem);
+ mlx5_free(mem);
return NULL;
}
mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
void *old_pools = cont->pools;
uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
- void *pools = rte_calloc(__func__, 1, mem_size, 0);
+ void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
if (!pools) {
rte_errno = ENOMEM;
mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
if (!mem_mng) {
- rte_free(pools);
+ mlx5_free(pools);
return -ENOMEM;
}
for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
cont->pools = pools;
rte_spinlock_unlock(&cont->resize_sl);
if (old_pools)
- rte_free(old_pools);
+ mlx5_free(old_pools);
return 0;
}
size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
- pool = rte_calloc(__func__, 1, size, 0);
+ pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
if (!pool) {
rte_errno = ENOMEM;
return NULL;
}
}
/* Register new matcher. */
- cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
+ cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0,
+ SOCKET_ID_ANY);
if (!cache_matcher) {
flow_dv_tbl_resource_release(dev, tbl);
return rte_flow_error_set(error, ENOMEM,
ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
&cache_matcher->matcher_object);
if (ret) {
- rte_free(cache_matcher);
+ mlx5_free(cache_matcher);
#ifdef HAVE_MLX5DV_DR
flow_dv_tbl_resource_release(dev, tbl);
#endif
ret = mlx5_flow_os_create_flow_action_tag(tag_be24,
&cache_resource->action);
if (ret) {
- rte_free(cache_resource);
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
rte_atomic32_inc(&cache_resource->refcnt);
if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
mlx5_flow_os_destroy_flow_action(cache_resource->action);
- rte_free(cache_resource);
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, EEXIST,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot insert tag");
LIST_REMOVE(matcher, next);
/* table ref-- in release interface. */
flow_dv_tbl_resource_release(dev, matcher->tbl);
- rte_free(matcher);
+ mlx5_free(matcher);
DRV_LOG(DEBUG, "port %u matcher %p: removed",
dev->data->port_id, (void *)matcher);
return 0;
claim_zero(mlx5_flow_os_destroy_flow_action
(cache_resource->action));
LIST_REMOVE(cache_resource, next);
- rte_free(cache_resource);
+ mlx5_free(cache_resource);
DRV_LOG(DEBUG, "modify-header resource %p: removed",
(void *)cache_resource);
return 0;
flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
if (mtd->drop_actn)
claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
- rte_free(mtd);
+ mlx5_free(mtd);
return 0;
}
rte_errno = ENOTSUP;
return NULL;
}
- mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0);
+ mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
if (!mtb) {
DRV_LOG(ERR, "Failed to allocate memory for meter.");
return NULL;
#include <rte_mtr_driver.h>
#include <mlx5_devx_cmds.h>
+#include <mlx5_malloc.h>
#include "mlx5.h"
#include "mlx5_flow.h"
if (ret)
return ret;
/* Meter profile memory allocation. */
- fmp = rte_calloc(__func__, 1, sizeof(struct mlx5_flow_meter_profile),
- RTE_CACHE_LINE_SIZE);
+ fmp = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_flow_meter_profile),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (fmp == NULL)
return -rte_mtr_error_set(error, ENOMEM,
RTE_MTR_ERROR_TYPE_UNSPECIFIED,
TAILQ_INSERT_TAIL(fmps, fmp, next);
return 0;
error:
- rte_free(fmp);
+ mlx5_free(fmp);
return ret;
}
NULL, "Meter profile is in use.");
/* Remove from list. */
TAILQ_REMOVE(&priv->flow_meter_profiles, fmp, next);
- rte_free(fmp);
+ mlx5_free(fmp);
return 0;
}
MLX5_ASSERT(!fmp->ref_cnt);
/* Remove from list. */
TAILQ_REMOVE(&priv->flow_meter_profiles, fmp, next);
- rte_free(fmp);
+ mlx5_free(fmp);
}
return 0;
}
#include <mlx5_glue.h>
#include <mlx5_prm.h>
+#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
/* Resize the container pool array. */
size = sizeof(struct mlx5_flow_counter_pool *) *
(n_valid + MLX5_CNT_CONTAINER_RESIZE);
- pools = rte_zmalloc(__func__, size, 0);
+ pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
+ SOCKET_ID_ANY);
if (!pools)
return 0;
if (n_valid) {
memcpy(pools, cont->pools,
sizeof(struct mlx5_flow_counter_pool *) *
n_valid);
- rte_free(cont->pools);
+ mlx5_free(cont->pools);
}
cont->pools = pools;
cont->n += MLX5_CNT_CONTAINER_RESIZE;
/* Allocate memory for new pool*/
size = sizeof(*pool) + (sizeof(*cnt_ext) + sizeof(*cnt)) *
MLX5_COUNTERS_PER_POOL;
- pool = rte_calloc(__func__, 1, size, 0);
+ pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
if (!pool)
return 0;
pool->type |= CNT_POOL_TYPE_EXT;
#include <mlx5_common_mp.h>
#include <mlx5_common_mr.h>
+#include <mlx5_malloc.h>
#include "mlx5.h"
#include "mlx5_rxtx.h"
}
}
exit:
- free(mp_rep.msgs);
+ mlx5_free(mp_rep.msgs);
}
/**
#include <rte_malloc.h>
#include <rte_ethdev_driver.h>
+#include <mlx5_malloc.h>
+
#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_rxtx.h"
rte_errno = EINVAL;
return -rte_errno;
}
- priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key,
- rss_conf->rss_key_len, 0);
+ priv->rss_conf.rss_key = mlx5_realloc(priv->rss_conf.rss_key,
+ MLX5_MEM_RTE,
+ rss_conf->rss_key_len,
+ 0, SOCKET_ID_ANY);
if (!priv->rss_conf.rss_key) {
rte_errno = ENOMEM;
return -rte_errno;
if (priv->reta_idx_n == reta_size)
return 0;
- mem = rte_realloc(priv->reta_idx,
- reta_size * sizeof((*priv->reta_idx)[0]), 0);
+ mem = mlx5_realloc(priv->reta_idx, MLX5_MEM_RTE,
+ reta_size * sizeof((*priv->reta_idx)[0]), 0,
+ SOCKET_ID_ANY);
if (!mem) {
rte_errno = ENOMEM;
return -rte_errno;
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
+#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
if (!dev->data->dev_conf.intr_conf.rxq)
return 0;
mlx5_rx_intr_vec_disable(dev);
- intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
+ intr_handle->intr_vec = mlx5_malloc(0,
+ n * sizeof(intr_handle->intr_vec[0]),
+ 0, SOCKET_ID_ANY);
if (intr_handle->intr_vec == NULL) {
DRV_LOG(ERR,
"port %u failed to allocate memory for interrupt"
free:
rte_intr_free_epoll_fd(intr_handle);
if (intr_handle->intr_vec)
- free(intr_handle->intr_vec);
+ mlx5_free(intr_handle->intr_vec);
intr_handle->nb_efd = 0;
intr_handle->intr_vec = NULL;
}
struct mlx5_ind_table_obj *ind_tbl;
unsigned int i = 0, j = 0, k = 0;
- ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
- queues_n * sizeof(uint16_t), 0);
+ ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
+ queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
if (!ind_tbl) {
rte_errno = ENOMEM;
return NULL;
log2above(queues_n) :
log2above(priv->config.ind_table_max_size));
- rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
- rqt_n * sizeof(uint32_t), 0);
+ rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
+ rqt_n * sizeof(uint32_t), 0,
+ SOCKET_ID_ANY);
if (!rqt_attr) {
DRV_LOG(ERR, "port %u cannot allocate RQT resources",
dev->data->port_id);
rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
rqt_attr);
- rte_free(rqt_attr);
+ mlx5_free(rqt_attr);
if (!ind_tbl->rqt) {
DRV_LOG(ERR, "port %u cannot create DevX RQT",
dev->data->port_id);
error:
for (j = 0; j < i; j++)
mlx5_rxq_release(dev, ind_tbl->queues[j]);
- rte_free(ind_tbl);
+ mlx5_free(ind_tbl);
DEBUG("port %u cannot create indirection table", dev->data->port_id);
return NULL;
}
claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
LIST_REMOVE(ind_tbl, next);
- rte_free(ind_tbl);
+ mlx5_free(ind_tbl);
return 0;
}
return 1;
rte_errno = errno;
goto error;
}
- rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
+ rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
if (!rxq) {
DEBUG("port %u cannot allocate drop Rx queue memory",
dev->data->port_id);
claim_zero(mlx5_glue->destroy_wq(rxq->wq));
if (rxq->cq)
claim_zero(mlx5_glue->destroy_cq(rxq->cq));
- rte_free(rxq);
+ mlx5_free(rxq);
priv->drop_queue.rxq = NULL;
}
rte_errno = errno;
goto error;
}
- ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
+ ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl), 0,
+ SOCKET_ID_ANY);
if (!ind_tbl) {
rte_errno = ENOMEM;
goto error;
claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
mlx5_rxq_obj_drop_release(dev);
- rte_free(ind_tbl);
+ mlx5_free(ind_tbl);
priv->drop_queue.hrxq->ind_table = NULL;
}
rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
return priv->drop_queue.hrxq;
}
- hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
+ hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
if (!hrxq) {
DRV_LOG(WARNING,
"port %u cannot allocate memory for drop queue",
mlx5_ind_table_obj_drop_release(dev);
if (hrxq) {
priv->drop_queue.hrxq = NULL;
- rte_free(hrxq);
+ mlx5_free(hrxq);
}
return NULL;
}
#endif
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
mlx5_ind_table_obj_drop_release(dev);
- rte_free(hrxq);
+ mlx5_free(hrxq);
priv->drop_queue.hrxq = NULL;
}
}
#include <rte_malloc.h>
#include <rte_hash_crc.h>
+#include <mlx5_malloc.h>
+
#include "mlx5_utils.h"
struct mlx5_hlist *
alloc_size = sizeof(struct mlx5_hlist) +
sizeof(struct mlx5_hlist_head) * act_size;
/* Using zmalloc, then no need to initialize the heads. */
- h = rte_zmalloc(name, alloc_size, RTE_CACHE_LINE_SIZE);
+ h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
if (!h) {
DRV_LOG(ERR, "No memory for hash list %s creation",
name ? name : "None");
if (cb)
cb(entry, ctx);
else
- rte_free(entry);
+ mlx5_free(entry);
}
}
- rte_free(h);
+ mlx5_free(h);
}
static inline void
(cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) ||
((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32))))
return NULL;
- pool = rte_zmalloc("mlx5_ipool", sizeof(*pool) + cfg->grow_trunk *
- sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE);
+ pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk *
+ sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
if (!pool)
return NULL;
pool->cfg = *cfg;
if (!pool->cfg.trunk_size)
pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE;
if (!cfg->malloc && !cfg->free) {
- pool->cfg.malloc = rte_malloc_socket;
- pool->cfg.free = rte_free;
+ pool->cfg.malloc = mlx5_malloc;
+ pool->cfg.free = mlx5_free;
}
pool->free_list = TRUNK_INVALID;
if (pool->cfg.need_lock)
int n_grow = pool->n_trunk_valid ? pool->n_trunk :
RTE_CACHE_LINE_SIZE / sizeof(void *);
- p = pool->cfg.malloc(pool->cfg.type,
- (pool->n_trunk_valid + n_grow) *
- sizeof(struct mlx5_indexed_trunk *),
- RTE_CACHE_LINE_SIZE, rte_socket_id());
+ p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) *
+ sizeof(struct mlx5_indexed_trunk *),
+ RTE_CACHE_LINE_SIZE, rte_socket_id());
if (!p)
return -ENOMEM;
if (pool->trunks)
/* rte_bitmap requires memory cacheline aligned. */
trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
trunk_size += bmp_size;
- trunk = pool->cfg.malloc(pool->cfg.type, trunk_size,
+ trunk = pool->cfg.malloc(0, trunk_size,
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (!trunk)
return -ENOMEM;
if (!pool->trunks)
pool->cfg.free(pool->trunks);
mlx5_ipool_unlock(pool);
- rte_free(pool);
+ mlx5_free(pool);
return 0;
}
.grow_shift = 1,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
};
if (type >= MLX5_L3T_TYPE_MAX) {
rte_errno = EINVAL;
return NULL;
}
- tbl = rte_zmalloc(NULL, sizeof(struct mlx5_l3t_tbl), 1);
+ tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1,
+ SOCKET_ID_ANY);
if (!tbl) {
rte_errno = ENOMEM;
return NULL;
tbl->eip = mlx5_ipool_create(&l3t_ip_cfg);
if (!tbl->eip) {
rte_errno = ENOMEM;
- rte_free(tbl);
+ mlx5_free(tbl);
tbl = NULL;
}
return tbl;
break;
}
MLX5_ASSERT(!m_tbl->ref_cnt);
- rte_free(g_tbl->tbl[i]);
+ mlx5_free(g_tbl->tbl[i]);
g_tbl->tbl[i] = 0;
if (!(--g_tbl->ref_cnt))
break;
}
MLX5_ASSERT(!g_tbl->ref_cnt);
- rte_free(tbl->tbl);
+ mlx5_free(tbl->tbl);
tbl->tbl = 0;
}
mlx5_ipool_destroy(tbl->eip);
- rte_free(tbl);
+ mlx5_free(tbl);
}
uint32_t
m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
NULL;
if (!(--m_tbl->ref_cnt)) {
- rte_free(m_tbl);
+ mlx5_free(m_tbl);
g_tbl->tbl
[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL;
if (!(--g_tbl->ref_cnt)) {
- rte_free(g_tbl);
+ mlx5_free(g_tbl);
tbl->tbl = 0;
}
}
/* Check the global table, create it if empty. */
g_tbl = tbl->tbl;
if (!g_tbl) {
- g_tbl = rte_zmalloc(NULL, sizeof(struct mlx5_l3t_level_tbl) +
- sizeof(void *) * MLX5_L3T_GT_SIZE, 1);
+ g_tbl = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5_l3t_level_tbl) +
+ sizeof(void *) * MLX5_L3T_GT_SIZE, 1,
+ SOCKET_ID_ANY);
if (!g_tbl) {
rte_errno = ENOMEM;
return -1;
*/
m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
if (!m_tbl) {
- m_tbl = rte_zmalloc(NULL, sizeof(struct mlx5_l3t_level_tbl) +
- sizeof(void *) * MLX5_L3T_MT_SIZE, 1);
+ m_tbl = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5_l3t_level_tbl) +
+ sizeof(void *) * MLX5_L3T_MT_SIZE, 1,
+ SOCKET_ID_ANY);
if (!m_tbl) {
rte_errno = ENOMEM;
return -1;
/* Lock is needed for multiple thread usage. */
uint32_t release_mem_en:1; /* Rlease trunk when it is free. */
const char *type; /* Memory allocate type name. */
- void *(*malloc)(const char *type, size_t size, unsigned int align,
+ void *(*malloc)(uint32_t flags, size_t size, unsigned int align,
int socket);
/* User defined memory allocator. */
void (*free)(void *addr); /* User defined memory release. */
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_nl.h>
+#include <mlx5_malloc.h>
#include "mlx5.h"
#include "mlx5_autoconf.h"
*/
return NULL;
}
- vmwa = rte_zmalloc(__func__, sizeof(*vmwa), sizeof(uint32_t));
+ vmwa = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*vmwa), sizeof(uint32_t),
+ SOCKET_ID_ANY);
if (!vmwa) {
DRV_LOG(WARNING,
"Can not allocate memory"
DRV_LOG(WARNING,
"Can not create Netlink socket"
" for VLAN workaround context");
- rte_free(vmwa);
+ mlx5_free(vmwa);
return NULL;
}
vmwa->vf_ifindex = ifindex;
}
if (vmwa->nl_socket >= 0)
close(vmwa->nl_socket);
- rte_free(vmwa);
+ mlx5_free(vmwa);
}