Using generic memory management calls instead of Unix memory management
calls for mempool.
Signed-off-by: Fady Bader <fady@mellanox.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
Reviewed-by: Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>
#include <inttypes.h>
#include <errno.h>
#include <sys/queue.h>
#include <inttypes.h>
#include <errno.h>
#include <sys/queue.h>
#include <rte_common.h>
#include <rte_log.h>
#include <rte_common.h>
#include <rte_log.h>
#include <rte_spinlock.h>
#include <rte_tailq.h>
#include <rte_function_versioning.h>
#include <rte_spinlock.h>
#include <rte_tailq.h>
#include <rte_function_versioning.h>
+#include <rte_eal_paging.h>
+
#include "rte_mempool.h"
#include "rte_mempool_trace.h"
#include "rte_mempool.h"
#include "rte_mempool_trace.h"
rte_memseg_list_walk(find_min_pagesz, &wa);
rte_memseg_list_walk(find_min_pagesz, &wa);
- return wa.min == SIZE_MAX ? (size_t) getpagesize() : wa.min;
+ return wa.min == SIZE_MAX ? (size_t) rte_mem_page_size() : wa.min;
else if (rte_eal_has_hugepages() || alloc_in_ext_mem)
*pg_sz = get_min_page_size(mp->socket_id);
else
else if (rte_eal_has_hugepages() || alloc_in_ext_mem)
*pg_sz = get_min_page_size(mp->socket_id);
else
- *pg_sz = getpagesize();
+ *pg_sz = rte_mem_page_size();
rte_mempool_trace_get_page_size(mp, *pg_sz);
return 0;
rte_mempool_trace_get_page_size(mp, *pg_sz);
return 0;
size_t min_chunk_size;
size_t align;
size_t min_chunk_size;
size_t align;
+ pg_sz = rte_mem_page_size();
pg_shift = rte_bsf32(pg_sz);
size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift,
&min_chunk_size, &align);
pg_shift = rte_bsf32(pg_sz);
size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift,
&min_chunk_size, &align);
+ rte_mem_unmap(opaque, size);
}
/* populate the mempool with an anonymous mapping */
}
/* populate the mempool with an anonymous mapping */
}
/* get chunk of virtually continuous memory */
}
/* get chunk of virtually continuous memory */
- addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, -1, 0);
- if (addr == MAP_FAILED) {
- rte_errno = errno;
+ addr = rte_mem_map(NULL, size, RTE_PROT_READ | RTE_PROT_WRITE,
+ RTE_MAP_SHARED | RTE_MAP_ANONYMOUS, -1, 0);
+ if (addr == NULL)
/* can't use MMAP_LOCKED, it does not exist on BSD */
/* can't use MMAP_LOCKED, it does not exist on BSD */
- if (mlock(addr, size) < 0) {
- rte_errno = errno;
- munmap(addr, size);
+ if (rte_mem_lock(addr, size) < 0) {
+ rte_mem_unmap(addr, size);
- ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(),
+ ret = rte_mempool_populate_virt(mp, addr, size, rte_mem_page_size(),
rte_mempool_memchunk_anon_free, addr);
if (ret == 0) /* should not happen */
ret = -ENOBUFS;
rte_mempool_memchunk_anon_free, addr);
if (ret == 0) /* should not happen */
ret = -ENOBUFS;