The following Linux calls are replaced by their matching rte APIs.
mmap ==> rte_mem_map()
munmap == >rte_mem_unmap()
sysconf(_SC_PAGESIZE) ==> rte_mem_page_size()
Signed-off-by: Ophir Munk <ophirmu@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
#include <rte_errno.h>
#include <rte_malloc.h>
+#include <rte_eal_paging.h>
#include "mlx5_prm.h"
#include "mlx5_devx_cmds.h"
return NULL;
}
memset(in, 0, in_size_dw * 4);
- pgsize = sysconf(_SC_PAGESIZE);
+ pgsize = rte_mem_page_size();
+ if (pgsize == (size_t)-1) {
+ mlx5_free(mkey);
+ DRV_LOG(ERR, "Failed to get page size");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
if (klm_num > 0) {
#define MLX5_MAX_LOG_RQ_SEGS 5u
/* The alignment needed for WQ buffer. */
-#define MLX5_WQE_BUF_ALIGNMENT sysconf(_SC_PAGESIZE)
+#define MLX5_WQE_BUF_ALIGNMENT rte_mem_page_size()
/* The alignment needed for CQ buffer. */
-#define MLX5_CQE_BUF_ALIGNMENT sysconf(_SC_PAGESIZE)
+#define MLX5_CQE_BUF_ALIGNMENT rte_mem_page_size()
/* Completion mode. */
enum mlx5_completion_mode {
#include <stdlib.h>
#include <errno.h>
#include <net/if.h>
-#include <sys/mman.h>
#include <linux/rtnetlink.h>
#include <linux/sockios.h>
#include <linux/ethtool.h>
#include <rte_spinlock.h>
#include <rte_string_fns.h>
#include <rte_alarm.h>
+#include <rte_eal_paging.h>
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
* Verbs callback to allocate a memory. This function should allocate the space
* according to the size provided residing inside a huge page.
* Please note that all allocation must respect the alignment from libmlx5
- * (i.e. currently sysconf(_SC_PAGESIZE)).
+ * (i.e. currently rte_mem_page_size()).
*
* @param[in] size
* The size in bytes of the memory to allocate.
{
struct mlx5_priv *priv = data;
void *ret;
- size_t alignment = sysconf(_SC_PAGESIZE);
unsigned int socket = SOCKET_ID_ANY;
+ size_t alignment = rte_mem_page_size();
+ if (alignment == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
#include <string.h>
#include <stdint.h>
#include <unistd.h>
-#include <sys/mman.h>
#include <inttypes.h>
/* Verbs header. */
#include <rte_gre.h>
#include <rte_vxlan.h>
#include <rte_gtp.h>
+#include <rte_eal_paging.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_prm.h>
MLX5_COUNTERS_PER_POOL +
sizeof(struct mlx5_counter_stats_raw)) * raws_n +
sizeof(struct mlx5_counter_stats_mem_mng);
- uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, sysconf(_SC_PAGESIZE),
+ size_t pgsize = rte_mem_page_size();
+ if (pgsize == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize,
SOCKET_ID_ANY);
int i;
#include <rte_interrupts.h>
#include <rte_debug.h>
#include <rte_io.h>
+#include <rte_eal_paging.h>
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
/* Calculate and allocate WQ memory space. */
wqe_size = 1 << log_wqe_size; /* round up power of two.*/
wq_size = wqe_n * wqe_size;
+ size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
+ if (alignment == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
- MLX5_WQE_BUF_ALIGNMENT, rxq_ctrl->socket);
+ alignment, rxq_ctrl->socket);
if (!buf)
return NULL;
rxq_data->wqes = buf;
#include <rte_alarm.h>
#include <rte_malloc.h>
#include <rte_cycles.h>
+#include <rte_eal_paging.h>
#include <mlx5_malloc.h>
struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
struct mlx5_devx_cq_attr cq_attr = { 0 };
struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
- size_t page_size = sysconf(_SC_PAGESIZE);
+ size_t page_size;
uint32_t umem_size, umem_dbrec;
int ret;
+ page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ return -ENOMEM;
+ }
/* Allocate memory buffer for CQEs and doorbell record. */
umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE;
umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
struct mlx5_devx_cq_attr cq_attr = { 0 };
struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
- size_t page_size = sysconf(_SC_PAGESIZE);
+ size_t page_size;
uint32_t umem_size, umem_dbrec;
int ret;
+ page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ return -ENOMEM;
+ }
sh->txpp.tsa = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
MLX5_TXPP_REARM_SQ_SIZE *
sizeof(struct mlx5_txpp_ts),
#include <string.h>
#include <stdint.h>
#include <unistd.h>
-#include <sys/mman.h>
#include <inttypes.h>
/* Verbs header. */
#include <rte_malloc.h>
#include <rte_ethdev_driver.h>
#include <rte_common.h>
+#include <rte_eal_paging.h>
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
{
struct mlx5_priv *priv = txq_ctrl->priv;
struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
- const size_t page_size = sysconf(_SC_PAGESIZE);
#ifndef RTE_ARCH_64
unsigned int lock_idx;
#endif
+ const size_t page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ }
if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
return;
void *addr;
uintptr_t uar_va;
uintptr_t offset;
- const size_t page_size = sysconf(_SC_PAGESIZE);
+ const size_t page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
return 0;
*/
uar_va = (uintptr_t)txq_ctrl->bf_reg;
offset = uar_va & (page_size - 1); /* Offset in page. */
- addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
- txq_ctrl->uar_mmap_offset);
- if (addr == MAP_FAILED) {
+ addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
+ fd, txq_ctrl->uar_mmap_offset);
+ if (!addr) {
DRV_LOG(ERR,
"port %u mmap failed for BF reg of txq %u",
txq->port_id, txq->idx);
txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
{
struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
- const size_t page_size = sysconf(_SC_PAGESIZE);
void *addr;
+ const size_t page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ }
if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
return;
addr = ppriv->uar_table[txq_ctrl->txq.idx];
- munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
+ rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
}
/**
struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
struct mlx5_devx_cq_attr cq_attr = { 0 };
struct mlx5_txq_obj *txq_obj = NULL;
- size_t page_size = sysconf(_SC_PAGESIZE);
+ size_t page_size;
struct mlx5_cqe *cqe;
uint32_t i, nqe;
+ size_t alignment = (size_t)-1;
int ret = 0;
MLX5_ASSERT(txq_data);
MLX5_ASSERT(!txq_ctrl->obj);
+ page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
sizeof(struct mlx5_txq_obj), 0,
txq_ctrl->socket);
goto error;
}
/* Allocate memory buffer for CQEs. */
+ alignment = MLX5_CQE_BUF_ALIGNMENT;
+ if (alignment == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ goto error;
+ }
txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
nqe * sizeof(struct mlx5_cqe),
- MLX5_CQE_BUF_ALIGNMENT,
+ alignment,
sh->numa_node);
if (!txq_obj->cq_buf) {
DRV_LOG(ERR,