#include <mlx5_common.h>
#include <mlx5_common_os.h>
#include <mlx5_common_mp.h>
+#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
/* Flow memory reclaim mode. */
#define MLX5_RECLAIM_MEM "reclaim_mem_mode"
+/* The default memory allocator used in PMD. */
+#define MLX5_SYS_MEM_EN "sys_mem_en"
+
static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
/* Shared memory between primary and secondary processes. */
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_encap_decap_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_push_vlan_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_tag_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_port_id_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_jump_ipool",
},
#endif
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_meter_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_mcp_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_hrxq_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_flow_handle_ipool",
},
{
.trunk_size = 4096,
.need_lock = 1,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "rte_flow_ipool",
},
};
struct mlx5_flow_id_pool *pool;
void *mem;
- pool = rte_zmalloc("id pool allocation", sizeof(*pool),
- RTE_CACHE_LINE_SIZE);
+ pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (!pool) {
DRV_LOG(ERR, "can't allocate id pool");
rte_errno = ENOMEM;
return NULL;
}
- mem = rte_zmalloc("", MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
+ mem = mlx5_malloc(MLX5_MEM_ZERO,
+ MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (!mem) {
DRV_LOG(ERR, "can't allocate mem for id pool");
rte_errno = ENOMEM;
pool->max_id = max_id;
return pool;
error:
- rte_free(pool);
+ mlx5_free(pool);
return NULL;
}
void
mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool)
{
- rte_free(pool->free_arr);
- rte_free(pool);
+ mlx5_free(pool->free_arr);
+ mlx5_free(pool);
}
/**
size = pool->curr - pool->free_arr;
size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR;
MLX5_ASSERT(size2 > size);
- mem = rte_malloc("", size2 * sizeof(uint32_t), 0);
+ mem = mlx5_malloc(0, size2 * sizeof(uint32_t), 0,
+ SOCKET_ID_ANY);
if (!mem) {
DRV_LOG(ERR, "can't allocate mem for id pool");
rte_errno = ENOMEM;
return -rte_errno;
}
memcpy(mem, pool->free_arr, size * sizeof(uint32_t));
- rte_free(pool->free_arr);
+ mlx5_free(pool->free_arr);
pool->free_arr = mem;
pool->curr = pool->free_arr + size;
pool->last = pool->free_arr + size2;
LIST_REMOVE(mng, next);
claim_zero(mlx5_devx_cmd_destroy(mng->dm));
claim_zero(mlx5_glue->devx_umem_dereg(mng->umem));
- rte_free(mem);
+ mlx5_free(mem);
}
/**
(pool, j)->dcs));
}
TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, next);
- rte_free(pool);
+ mlx5_free(pool);
pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
}
- rte_free(sh->cmng.ccont[i].pools);
+ mlx5_free(sh->cmng.ccont[i].pools);
}
mng = LIST_FIRST(&sh->cmng.mem_mngs);
while (mng) {
mlx5_ipool_destroy(sh->ipool[i]);
}
+/*
+ * Check if dynamic flex parser for eCPRI already exists.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * true on exists, false on not.
+ */
+bool
+mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flex_parser_profiles *prf =
+ &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+
+ return !!prf->obj;
+}
+
+/*
+ * Allocation of a flex parser for eCPRI. Once created, this parser related
+ * resources will be held until the device is closed.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flex_parser_profiles *prf =
+ &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+ struct mlx5_devx_graph_node_attr node = {
+ .modify_field_select = 0,
+ };
+ uint32_t ids[8];
+ int ret;
+
+ if (!priv->config.hca_attr.parse_graph_flex_node) {
+ DRV_LOG(ERR, "Dynamic flex parser is not supported "
+ "for device %s.", priv->dev_data->name);
+ return -ENOTSUP;
+ }
+ node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
+ /* 8 bytes now: 4B common header + 4B message body header. */
+ node.header_length_base_value = 0x8;
+ /* After MAC layer: Ether / VLAN. */
+ node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_MAC;
+ /* Type of compared condition should be 0xAEFE in the L2 layer. */
+ node.in[0].compare_condition_value = RTE_ETHER_TYPE_ECPRI;
+ /* Sample #0: type in common header. */
+ node.sample[0].flow_match_sample_en = 1;
+ /* Fixed offset. */
+ node.sample[0].flow_match_sample_offset_mode = 0x0;
+ /* Only the 2nd byte will be used. */
+ node.sample[0].flow_match_sample_field_base_offset = 0x0;
+ /* Sample #1: message payload. */
+ node.sample[1].flow_match_sample_en = 1;
+ /* Fixed offset. */
+ node.sample[1].flow_match_sample_offset_mode = 0x0;
+ /*
+ * Only the first two bytes will be used right now, and its offset will
+ * start after the common header that with the length of a DW(u32).
+ */
+ node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t);
+ prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->ctx, &node);
+ if (!prf->obj) {
+ DRV_LOG(ERR, "Failed to create flex parser node object.");
+ return (rte_errno == 0) ? -ENODEV : -rte_errno;
+ }
+ prf->num = 2;
+ ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to query sample IDs.");
+ return (rte_errno == 0) ? -ENODEV : -rte_errno;
+ }
+ prf->offset[0] = 0x0;
+ prf->offset[1] = sizeof(uint32_t);
+ prf->ids[0] = ids[0];
+ prf->ids[1] = ids[1];
+ return 0;
+}
+
+/*
+ * Destroy the flex parser node, including the parser itself, input / output
+ * arcs and DW samples. Resources could be reused then.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flex_parser_profiles *prf =
+ &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+
+ if (prf->obj)
+ mlx5_devx_cmd_destroy(prf->obj);
+ prf->obj = NULL;
+}
+
/**
* Allocate shared device context. If there is multiport device the
* master and representors will share this context, if there is single
err = ENOMEM;
goto error;
}
+ sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, 0);
+ if (!sh->tx_uar) {
+ DRV_LOG(ERR, "Failed to allocate DevX UAR.");
+ err = ENOMEM;
+ goto error;
+ }
}
sh->flow_id_pool = mlx5_flow_id_pool_alloc
((1 << HAIRPIN_FLOW_ID_BITS) - 1);
err = ENOMEM;
goto error;
}
+#ifndef RTE_ARCH_64
+ /* Initialize UAR access locks for 32bit implementations. */
+ rte_spinlock_init(&sh->uar_lock_cq);
+ for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+ rte_spinlock_init(&sh->uar_lock[i]);
+#endif
/*
* Once the device is added to the list of memory event
* callback, its global MR cache table cannot be expanded
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
return sh;
error:
+ pthread_mutex_destroy(&sh->txpp.mutex);
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
MLX5_ASSERT(sh);
if (sh->cnt_id_tbl) {
mlx5_l3t_destroy(sh->cnt_id_tbl);
sh->cnt_id_tbl = NULL;
}
+ if (sh->tx_uar) {
+ mlx5_glue->devx_free_uar(sh->tx_uar);
+ sh->tx_uar = NULL;
+ }
if (sh->tis)
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
if (sh->td)
mlx5_l3t_destroy(sh->cnt_id_tbl);
sh->cnt_id_tbl = NULL;
}
+ if (sh->tx_uar) {
+ mlx5_glue->devx_free_uar(sh->tx_uar);
+ sh->tx_uar = NULL;
+ }
if (sh->pd)
claim_zero(mlx5_glue->dealloc_pd(sh->pd));
if (sh->tis)
claim_zero(mlx5_glue->close_device(sh->ctx));
if (sh->flow_id_pool)
mlx5_flow_id_pool_release(sh->flow_id_pool);
+ pthread_mutex_destroy(&sh->txpp.mutex);
rte_free(sh);
exit:
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
entry);
MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
- rte_free(tbl_data);
+ mlx5_free(tbl_data);
}
table_key.direction = 1;
pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
entry);
MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
- rte_free(tbl_data);
+ mlx5_free(tbl_data);
}
table_key.direction = 0;
table_key.domain = 1;
entry);
MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
- rte_free(tbl_data);
+ mlx5_free(tbl_data);
}
mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
}
.direction = 0,
}
};
- struct mlx5_flow_tbl_data_entry *tbl_data = rte_zmalloc(NULL,
- sizeof(*tbl_data), 0);
+ struct mlx5_flow_tbl_data_entry *tbl_data = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*tbl_data), 0,
+ SOCKET_ID_ANY);
if (!tbl_data) {
err = ENOMEM;
rte_atomic32_init(&tbl_data->tbl.refcnt);
rte_atomic32_inc(&tbl_data->tbl.refcnt);
table_key.direction = 1;
- tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+ tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
+ SOCKET_ID_ANY);
if (!tbl_data) {
err = ENOMEM;
goto error;
rte_atomic32_inc(&tbl_data->tbl.refcnt);
table_key.direction = 0;
table_key.domain = 1;
- tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+ tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
+ SOCKET_ID_ANY);
if (!tbl_data) {
err = ENOMEM;
goto error;
rte_wmb();
/* Disable datapath on secondary process. */
mlx5_mp_req_stop_rxtx(dev);
+ /* Free the eCPRI flex parser resource. */
+ mlx5_flex_parser_ecpri_release(dev);
if (priv->rxqs != NULL) {
/* XXX race condition if mlx5_rx_burst() is still running. */
usleep(1000);
mlx5_mprq_free_mp(dev);
mlx5_os_free_shared_dr(priv);
if (priv->rss_conf.rss_key != NULL)
- rte_free(priv->rss_conf.rss_key);
+ mlx5_free(priv->rss_conf.rss_key);
if (priv->reta_idx != NULL)
- rte_free(priv->reta_idx);
+ mlx5_free(priv->reta_idx);
if (priv->config.vf)
mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
dev->data->mac_addrs,
return -rte_errno;
}
config->reclaim_mode = tmp;
+ } else if (strcmp(MLX5_SYS_MEM_EN, key) == 0) {
+ config->sys_mem_en = !!tmp;
} else {
DRV_LOG(WARNING, "%s: unknown parameter", key);
rte_errno = EINVAL;
MLX5_CLASS_ARG_NAME,
MLX5_HP_BUF_SIZE,
MLX5_RECLAIM_MEM,
+ MLX5_SYS_MEM_EN,
NULL,
};
struct rte_kvargs *kvlist;