#include <inttypes.h>
#include <errno.h>
#include <sys/queue.h>
+#include <sys/mman.h>
#include <rte_common.h>
#include <rte_log.h>
return (size_t)paddr_idx << pg_shift;
}
-#ifndef RTE_LIBRTE_XEN_DOM0
-/* stub if DOM0 support not configured */
-struct rte_mempool *
-rte_dom0_mempool_create(const char *name __rte_unused,
- unsigned n __rte_unused,
- unsigned elt_size __rte_unused,
- unsigned cache_size __rte_unused,
- unsigned private_data_size __rte_unused,
- rte_mempool_ctor_t *mp_init __rte_unused,
- void *mp_init_arg __rte_unused,
- rte_mempool_obj_ctor_t *obj_init __rte_unused,
- void *obj_init_arg __rte_unused,
- int socket_id __rte_unused,
- unsigned flags __rte_unused)
-{
- rte_errno = EINVAL;
- return NULL;
-}
-#endif
-
/* create the internal ring */
static int
rte_mempool_ring_create(struct rte_mempool *mp)
return -rte_errno;
mp->ring = r;
+ mp->flags |= MEMPOOL_F_RING_CREATED;
return 0;
}
* zone. Return the number of objects added, or a negative value
* on error.
*/
-static int
+int
rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque)
unsigned i = 0;
size_t off;
struct rte_mempool_memhdr *memhdr;
+ int ret;
+
+ /* create the internal ring if not already done */
+ if ((mp->flags & MEMPOOL_F_RING_CREATED) == 0) {
+ ret = rte_mempool_ring_create(mp);
+ if (ret < 0)
+ return ret;
+ }
/* mempool is already populated */
if (mp->populated_size >= mp->size)
/* Add objects in the pool, using a table of physical pages. Return the
* number of objects added, or a negative value on error.
*/
-static int
+int
rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
/* Populate the mempool with a virtual area. Return the number of
* objects added, or a negative value on error.
*/
-static int
+int
rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque)
mp->populated_size < mp->size; off += phys_len) {
paddr = rte_mem_virt2phy(addr + off);
+ /* required for xen_dom0 to get the machine address */
+ paddr = rte_mem_phy2mch(-1, paddr);
+
if (paddr == RTE_BAD_PHYS_ADDR) {
ret = -EINVAL;
goto fail;
* and populate them. Return the number of objects added, or a negative
* value on error.
*/
-static int
+int
rte_mempool_populate_default(struct rte_mempool *mp)
{
int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
goto fail;
}
- if (rte_eal_has_hugepages())
+ /* use memzone physical address if it is valid */
+ if (rte_eal_has_hugepages() && !rte_xen_dom0_supported())
ret = rte_mempool_populate_phys(mp, mz->addr,
mz->phys_addr, mz->len,
rte_mempool_memchunk_mz_free,
return ret;
}
-/* free a mempool */
+/* return the memory size required for mempool objects in anonymous mem */
+static size_t
+get_anon_size(const struct rte_mempool *mp)
+{
+ size_t size, total_elt_sz, pg_sz, pg_shift;
+
+ pg_sz = getpagesize();
+ pg_shift = rte_bsf32(pg_sz);
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+ size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift);
+
+ return size;
+}
+
+/* unmap a memory zone mapped by rte_mempool_populate_anon() */
static void
+rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr *memhdr,
+ void *opaque)
+{
+ munmap(opaque, get_anon_size(memhdr->mp));
+}
+
+/* populate the mempool with an anonymous mapping */
+int
+rte_mempool_populate_anon(struct rte_mempool *mp)
+{
+ size_t size;
+ int ret;
+ char *addr;
+
+ /* mempool is already populated, error */
+ if (!STAILQ_EMPTY(&mp->mem_list)) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ /* get chunk of virtually continuous memory */
+ size = get_anon_size(mp);
+ addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ rte_errno = errno;
+ return 0;
+ }
+ /* can't use MMAP_LOCKED, it does not exist on BSD */
+ if (mlock(addr, size) < 0) {
+ rte_errno = errno;
+ munmap(addr, size);
+ return 0;
+ }
+
+ ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(),
+ rte_mempool_memchunk_anon_free, addr);
+ if (ret == 0)
+ goto fail;
+
+ return mp->populated_size;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return 0;
+}
+
+/* free a mempool */
+void
rte_mempool_free(struct rte_mempool *mp)
{
struct rte_mempool_list *mempool_list = NULL;
}
/* create an empty mempool */
-static struct rte_mempool *
+struct rte_mempool *
rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
unsigned cache_size, unsigned private_data_size,
int socket_id, unsigned flags)
STAILQ_INIT(&mp->elt_list);
STAILQ_INIT(&mp->mem_list);
- if (rte_mempool_ring_create(mp) < 0)
- goto exit_unlock;
-
/*
* local_cache pointer is set even if cache_size is zero.
* The local_cache points to just past the elt_pa[] array.
{
struct rte_mempool *mp;
- if (rte_xen_dom0_supported())
- return rte_dom0_mempool_create(name, n, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags);
-
mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
private_data_size, socket_id, flags);
if (mp == NULL)