X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.c;h=b54de438a9cfb327321ee5ca1adb5727973a4385;hb=125e39139b5f2854fb301123de3705049d1f9d68;hp=891458af11c350f6aa871bbdb4c9b848f96cef17;hpb=1a6bbb8b6fcab760afd8df3902f70c14b7347c95;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 891458af11..b54de438a9 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -238,6 +239,9 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift) { size_t obj_per_page, pg_num, pg_sz; + if (total_elt_sz == 0) + return 0; + if (pg_shift == 0) return total_elt_sz * elt_num; @@ -299,59 +303,18 @@ rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num, return (size_t)paddr_idx << pg_shift; } -#ifndef RTE_LIBRTE_XEN_DOM0 -/* stub if DOM0 support not configured */ -struct rte_mempool * -rte_dom0_mempool_create(const char *name __rte_unused, - unsigned n __rte_unused, - unsigned elt_size __rte_unused, - unsigned cache_size __rte_unused, - unsigned private_data_size __rte_unused, - rte_mempool_ctor_t *mp_init __rte_unused, - void *mp_init_arg __rte_unused, - rte_mempool_obj_ctor_t *obj_init __rte_unused, - void *obj_init_arg __rte_unused, - int socket_id __rte_unused, - unsigned flags __rte_unused) -{ - rte_errno = EINVAL; - return NULL; -} -#endif - -/* create the mempool */ -struct rte_mempool * -rte_mempool_create(const char *name, unsigned n, unsigned elt_size, - unsigned cache_size, unsigned private_data_size, - rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, - int socket_id, unsigned flags) -{ - if (rte_xen_dom0_supported()) - return rte_dom0_mempool_create(name, n, elt_size, - cache_size, private_data_size, - mp_init, mp_init_arg, - obj_init, obj_init_arg, - socket_id, flags); - else - return rte_mempool_xmem_create(name, n, elt_size, - cache_size, private_data_size, - mp_init, mp_init_arg, - obj_init, obj_init_arg, - socket_id, flags, - NULL, NULL, MEMPOOL_PG_NUM_DEFAULT, - MEMPOOL_PG_SHIFT_MAX); -} - /* create the internal ring */ static int rte_mempool_ring_create(struct rte_mempool *mp) { - int rg_flags = 0; + int rg_flags = 0, ret; char rg_name[RTE_RING_NAMESIZE]; struct rte_ring *r; - snprintf(rg_name, sizeof(rg_name), RTE_MEMPOOL_MZ_FORMAT, mp->name); + ret = snprintf(rg_name, sizeof(rg_name), + RTE_MEMPOOL_MZ_FORMAT, mp->name); + if (ret < 0 || ret >= (int)sizeof(rg_name)) + return -ENAMETOOLONG; /* ring flags */ if (mp->flags & MEMPOOL_F_SP_PUT) @@ -370,6 +333,7 @@ rte_mempool_ring_create(struct rte_mempool *mp) return -rte_errno; mp->ring = r; + mp->flags |= MEMPOOL_F_RING_CREATED; return 0; } @@ -410,7 +374,7 @@ rte_mempool_free_memchunks(struct rte_mempool *mp) * zone. Return the number of objects added, or a negative value * on error. */ -static int +int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque) @@ -419,6 +383,14 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, unsigned i = 0; size_t off; struct rte_mempool_memhdr *memhdr; + int ret; + + /* create the internal ring if not already done */ + if ((mp->flags & MEMPOOL_F_RING_CREATED) == 0) { + ret = rte_mempool_ring_create(mp); + if (ret < 0) + return ret; + } /* mempool is already populated */ if (mp->populated_size >= mp->size) @@ -444,7 +416,11 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, while (off + total_elt_sz <= len && mp->populated_size < mp->size) { off += mp->header_size; - mempool_add_elem(mp, (char *)vaddr + off, paddr + off); + if (paddr == RTE_BAD_PHYS_ADDR) + mempool_add_elem(mp, (char *)vaddr + off, + RTE_BAD_PHYS_ADDR); + else + mempool_add_elem(mp, (char *)vaddr + off, paddr + off); off += mp->elt_size + mp->trailer_size; i++; } @@ -461,7 +437,7 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, /* Add objects in the pool, using a table of physical pages. Return the * number of objects added, or a negative value on error. */ -static int +int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque) @@ -474,6 +450,10 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, if (mp->nb_mem_chunks != 0) return -EEXIST; + if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) + return rte_mempool_populate_phys(mp, vaddr, RTE_BAD_PHYS_ADDR, + pg_num * pg_sz, free_cb, opaque); + for (i = 0; i < pg_num && mp->populated_size < mp->size; i += n) { /* populate with the largest group of contiguous pages */ @@ -497,7 +477,7 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, /* Populate the mempool with a virtual area. Return the number of * objects added, or a negative value on error. */ -static int +int rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque) @@ -515,10 +495,17 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, if (RTE_ALIGN_CEIL(len, pg_sz) != len) return -EINVAL; + if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) + return rte_mempool_populate_phys(mp, addr, RTE_BAD_PHYS_ADDR, + len, free_cb, opaque); + for (off = 0; off + pg_sz <= len && mp->populated_size < mp->size; off += phys_len) { paddr = rte_mem_virt2phy(addr + off); + /* required for xen_dom0 to get the machine address */ + paddr = rte_mem_phy2mch(-1, paddr); + if (paddr == RTE_BAD_PHYS_ADDR) { ret = -EINVAL; goto fail; @@ -555,13 +542,14 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, * and populate them. Return the number of objects added, or a negative * value on error. */ -static int +int rte_mempool_populate_default(struct rte_mempool *mp) { int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; char mz_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; size_t size, total_elt_sz, align, pg_sz, pg_shift; + phys_addr_t paddr; unsigned mz_id, n; int ret; @@ -601,9 +589,14 @@ rte_mempool_populate_default(struct rte_mempool *mp) goto fail; } - if (rte_eal_has_hugepages()) + if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) + paddr = RTE_BAD_PHYS_ADDR; + else + paddr = mz->phys_addr; + + if (rte_eal_has_hugepages() && !rte_xen_dom0_supported()) ret = rte_mempool_populate_phys(mp, mz->addr, - mz->phys_addr, mz->len, + paddr, mz->len, rte_mempool_memchunk_mz_free, (void *)(uintptr_t)mz); else @@ -622,20 +615,103 @@ rte_mempool_populate_default(struct rte_mempool *mp) return ret; } -/* - * Create the mempool over already allocated chunk of memory. - * That external memory buffer can consists of physically disjoint pages. - * Setting vaddr to NULL, makes mempool to fallback to original behaviour - * and allocate space for mempool and it's elements as one big chunk of - * physically continuos memory. - * */ +/* return the memory size required for mempool objects in anonymous mem */ +static size_t +get_anon_size(const struct rte_mempool *mp) +{ + size_t size, total_elt_sz, pg_sz, pg_shift; + + pg_sz = getpagesize(); + pg_shift = rte_bsf32(pg_sz); + total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; + size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift); + + return size; +} + +/* unmap a memory zone mapped by rte_mempool_populate_anon() */ +static void +rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr *memhdr, + void *opaque) +{ + munmap(opaque, get_anon_size(memhdr->mp)); +} + +/* populate the mempool with an anonymous mapping */ +int +rte_mempool_populate_anon(struct rte_mempool *mp) +{ + size_t size; + int ret; + char *addr; + + /* mempool is already populated, error */ + if (!STAILQ_EMPTY(&mp->mem_list)) { + rte_errno = EINVAL; + return 0; + } + + /* get chunk of virtually continuous memory */ + size = get_anon_size(mp); + addr = mmap(NULL, size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (addr == MAP_FAILED) { + rte_errno = errno; + return 0; + } + /* can't use MMAP_LOCKED, it does not exist on BSD */ + if (mlock(addr, size) < 0) { + rte_errno = errno; + munmap(addr, size); + return 0; + } + + ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(), + rte_mempool_memchunk_anon_free, addr); + if (ret == 0) + goto fail; + + return mp->populated_size; + + fail: + rte_mempool_free_memchunks(mp); + return 0; +} + +/* free a mempool */ +void +rte_mempool_free(struct rte_mempool *mp) +{ + struct rte_mempool_list *mempool_list = NULL; + struct rte_tailq_entry *te; + + if (mp == NULL) + return; + + mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list); + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + /* find out tailq entry */ + TAILQ_FOREACH(te, mempool_list, next) { + if (te->data == (void *)mp) + break; + } + + if (te != NULL) { + TAILQ_REMOVE(mempool_list, te, next); + rte_free(te); + } + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + + rte_mempool_free_memchunks(mp); + rte_ring_free(mp->ring); + rte_memzone_free(mp->mz); +} + +/* create an empty mempool */ struct rte_mempool * -rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, - unsigned cache_size, unsigned private_data_size, - rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, - int socket_id, unsigned flags, void *vaddr, - const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift) +rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + int socket_id, unsigned flags) { char mz_name[RTE_MEMZONE_NAMESIZE]; struct rte_mempool_list *mempool_list; @@ -668,18 +744,6 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, return NULL; } - /* check that we have both VA and PA */ - if (vaddr != NULL && paddr == NULL) { - rte_errno = EINVAL; - return NULL; - } - - /* Check that pg_num and pg_shift parameters are valid. */ - if (pg_num == 0 || pg_shift > MEMPOOL_PG_SHIFT_MAX) { - rte_errno = EINVAL; - return NULL; - } - /* "no cache align" imply "no spread" */ if (flags & MEMPOOL_F_NO_CACHE_ALIGN) flags |= MEMPOOL_F_NO_SPREAD; @@ -707,28 +771,33 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, goto exit_unlock; } - /* - * If user provided an external memory buffer, then use it to - * store mempool objects. Otherwise reserve a memzone that is large - * enough to hold mempool header and metadata plus mempool objects. - */ mempool_size = MEMPOOL_HEADER_SIZE(mp, cache_size); mempool_size += private_data_size; mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN); - snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name); + ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name); + if (ret < 0 || ret >= (int)sizeof(mz_name)) { + rte_errno = ENAMETOOLONG; + goto exit_unlock; + } mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags); if (mz == NULL) goto exit_unlock; /* init the mempool structure */ + mp = mz->addr; memset(mp, 0, sizeof(*mp)); - snprintf(mp->name, sizeof(mp->name), "%s", name); + ret = snprintf(mp->name, sizeof(mp->name), "%s", name); + if (ret < 0 || ret >= (int)sizeof(mp->name)) { + rte_errno = ENAMETOOLONG; + goto exit_unlock; + } mp->mz = mz; mp->socket_id = socket_id; mp->size = n; mp->flags = flags; + mp->socket_id = socket_id; mp->elt_size = objsz.elt_size; mp->header_size = objsz.header_size; mp->trailer_size = objsz.trailer_size; @@ -738,9 +807,6 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, STAILQ_INIT(&mp->elt_list); STAILQ_INIT(&mp->mem_list); - if (rte_mempool_ring_create(mp) < 0) - goto exit_unlock; - /* * local_cache pointer is set even if cache_size is zero. * The local_cache points to just past the elt_pa[] array. @@ -748,47 +814,112 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, mp->local_cache = (struct rte_mempool_cache *) RTE_PTR_ADD(mp, MEMPOOL_HEADER_SIZE(mp, 0)); - /* call the initializer */ + te->data = mp; + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + TAILQ_INSERT_TAIL(mempool_list, te, next); + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK); + + return mp; + +exit_unlock: + rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK); + rte_free(te); + rte_mempool_free(mp); + return NULL; +} + +/* create the mempool */ +struct rte_mempool * +rte_mempool_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags) +{ + struct rte_mempool *mp; + + mp = rte_mempool_create_empty(name, n, elt_size, cache_size, + private_data_size, socket_id, flags); + if (mp == NULL) + return NULL; + + /* call the mempool priv initializer */ if (mp_init) mp_init(mp, mp_init_arg); - /* mempool elements allocated together with mempool */ - if (vaddr == NULL) - ret = rte_mempool_populate_default(mp); - else - ret = rte_mempool_populate_phys_tab(mp, vaddr, - paddr, pg_num, pg_shift, NULL, NULL); - if (ret < 0) { - rte_errno = -ret; - goto exit_unlock; - } else if (ret != (int)mp->size) { - rte_errno = EINVAL; - goto exit_unlock; - } + if (rte_mempool_populate_default(mp) < 0) + goto fail; - /* call the initializer */ + /* call the object initializers */ if (obj_init) rte_mempool_obj_iter(mp, obj_init, obj_init_arg); - te->data = (void *) mp; + return mp; - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); - TAILQ_INSERT_TAIL(mempool_list, te, next); - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); - rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK); + fail: + rte_mempool_free(mp); + return NULL; +} - return mp; +/* + * Create the mempool over already allocated chunk of memory. + * That external memory buffer can consists of physically disjoint pages. + * Setting vaddr to NULL, makes mempool to fallback to original behaviour + * and allocate space for mempool and it's elements as one big chunk of + * physically continuos memory. + */ +struct rte_mempool * +rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags, void *vaddr, + const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift) +{ + struct rte_mempool *mp = NULL; + int ret; -exit_unlock: - rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK); - if (mp != NULL) { - rte_mempool_free_memchunks(mp); - rte_ring_free(mp->ring); + /* no virtual address supplied, use rte_mempool_create() */ + if (vaddr == NULL) + return rte_mempool_create(name, n, elt_size, cache_size, + private_data_size, mp_init, mp_init_arg, + obj_init, obj_init_arg, socket_id, flags); + + /* check that we have both VA and PA */ + if (paddr == NULL) { + rte_errno = EINVAL; + return NULL; } - rte_free(te); - if (mz != NULL) - rte_memzone_free(mz); + /* Check that pg_shift parameter is valid. */ + if (pg_shift > MEMPOOL_PG_SHIFT_MAX) { + rte_errno = EINVAL; + return NULL; + } + + mp = rte_mempool_create_empty(name, n, elt_size, cache_size, + private_data_size, socket_id, flags); + if (mp == NULL) + return NULL; + + /* call the mempool priv initializer */ + if (mp_init) + mp_init(mp, mp_init_arg); + + ret = rte_mempool_populate_phys_tab(mp, vaddr, paddr, pg_num, pg_shift, + NULL, NULL); + if (ret < 0 || ret != (int)mp->size) + goto fail; + + /* call the object initializers */ + if (obj_init) + rte_mempool_obj_iter(mp, obj_init, obj_init_arg); + + return mp; + + fail: + rte_mempool_free(mp); return NULL; }