X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.c;h=0bde995b5290b7415288b0c84d9c94642e984322;hb=c0280d5d8ac047b9f56d21d17e2fd0961891072a;hp=88e49c75117e31fe4f4e1ca7ebe71168800dd1aa;hpb=354788b60cfd053b5a576734662bd8f70fe8e419;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 88e49c7511..0bde995b52 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -31,8 +31,10 @@ #include #include #include +#include #include "rte_mempool.h" +#include "rte_mempool_trace.h" TAILQ_HEAD(rte_mempool_list, rte_tailq_entry); @@ -45,6 +47,7 @@ EAL_REGISTER_TAILQ(rte_mempool_tailq) #define CALC_CACHE_FLUSHTHRESH(c) \ ((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER)) +#if defined(RTE_ARCH_X86) /* * return the greatest common divisor between a and b (fast algorithm) * @@ -74,12 +77,13 @@ static unsigned get_gcd(unsigned a, unsigned b) } /* - * Depending on memory configuration, objects addresses are spread + * Depending on memory configuration on x86 arch, objects addresses are spread * between channels and ranks in RAM: the pool allocator will add * padding between objects. This function return the new size of the * object. */ -static unsigned optimize_object_size(unsigned obj_size) +static unsigned int +arch_mem_object_align(unsigned int obj_size) { unsigned nrank, nchan; unsigned new_obj_size; @@ -99,6 +103,13 @@ static unsigned optimize_object_size(unsigned obj_size) new_obj_size++; return new_obj_size * RTE_MEMPOOL_ALIGN; } +#else +static unsigned int +arch_mem_object_align(unsigned int obj_size) +{ + return obj_size; +} +#endif struct pagesz_walk_arg { int socket_id; @@ -234,8 +245,8 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, */ if ((flags & MEMPOOL_F_NO_SPREAD) == 0) { unsigned new_size; - new_size = optimize_object_size(sz->header_size + sz->elt_size + - sz->trailer_size); + new_size = arch_mem_object_align + (sz->header_size + sz->elt_size + sz->trailer_size); sz->trailer_size = new_size - sz->header_size - sz->elt_size; } @@ -293,12 +304,17 @@ mempool_ops_alloc_once(struct rte_mempool *mp) return 0; } +__vsym int +rte_mempool_populate_iova_v21(struct rte_mempool *mp, char *vaddr, + rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque); + /* Add objects in the pool, using a physically contiguous memory * zone. Return the number of objects added, or a negative value * on error. */ -int -rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, +__vsym int +rte_mempool_populate_iova_v21(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque) { @@ -329,10 +345,10 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr; else - off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr; + off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_MEMPOOL_ALIGN) - vaddr; if (off > len) { - ret = -EINVAL; + ret = 0; goto fail; } @@ -343,12 +359,14 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, /* not enough room to store one object */ if (i == 0) { - ret = -EINVAL; + ret = 0; goto fail; } STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next); mp->nb_mem_chunks++; + + rte_mempool_trace_populate_iova(mp, vaddr, iova, len, free_cb, opaque); return i; fail: @@ -356,11 +374,58 @@ fail: return ret; } +BIND_DEFAULT_SYMBOL(rte_mempool_populate_iova, _v21, 21); +MAP_STATIC_SYMBOL( + int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, + rte_iova_t iova, size_t len, + rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque), + rte_mempool_populate_iova_v21); + +__vsym int +rte_mempool_populate_iova_v20(struct rte_mempool *mp, char *vaddr, + rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque); + +__vsym int +rte_mempool_populate_iova_v20(struct rte_mempool *mp, char *vaddr, + rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque) +{ + int ret; + + ret = rte_mempool_populate_iova_v21(mp, vaddr, iova, len, free_cb, + opaque); + if (ret == 0) + ret = -EINVAL; + + return ret; +} +VERSION_SYMBOL(rte_mempool_populate_iova, _v20, 20.0); + +static rte_iova_t +get_iova(void *addr) +{ + struct rte_memseg *ms; + + /* try registered memory first */ + ms = rte_mem_virt2memseg(addr, NULL); + if (ms == NULL || ms->iova == RTE_BAD_IOVA) + /* fall back to actual physical address */ + return rte_mem_virt2iova(addr); + return ms->iova + RTE_PTR_DIFF(addr, ms->addr); +} + +__vsym int +rte_mempool_populate_virt_v21(struct rte_mempool *mp, char *addr, + size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque); + /* Populate the mempool with a virtual area. Return the number of * objects added, or a negative value on error. */ -int -rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, +__vsym int +rte_mempool_populate_virt_v21(struct rte_mempool *mp, char *addr, size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque) { @@ -375,12 +440,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, for (off = 0; off < len && mp->populated_size < mp->size; off += phys_len) { - iova = rte_mem_virt2iova(addr + off); - - if (iova == RTE_BAD_IOVA && rte_eal_has_hugepages()) { - ret = -EINVAL; - goto fail; - } + iova = get_iova(addr + off); /* populate with the largest group of contiguous pages */ for (phys_len = RTE_MIN( @@ -391,15 +451,17 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, phys_len = RTE_MIN(phys_len + pg_sz, len - off)) { rte_iova_t iova_tmp; - iova_tmp = rte_mem_virt2iova(addr + off + phys_len); + iova_tmp = get_iova(addr + off + phys_len); if (iova_tmp == RTE_BAD_IOVA || iova_tmp != iova + phys_len) break; } - ret = rte_mempool_populate_iova(mp, addr + off, iova, + ret = rte_mempool_populate_iova_v21(mp, addr + off, iova, phys_len, free_cb, opaque); + if (ret == 0) + continue; if (ret < 0) goto fail; /* no need to call the free callback for next chunks */ @@ -407,12 +469,68 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, cnt += ret; } + rte_mempool_trace_populate_virt(mp, addr, len, pg_sz, free_cb, opaque); return cnt; fail: rte_mempool_free_memchunks(mp); return ret; } +BIND_DEFAULT_SYMBOL(rte_mempool_populate_virt, _v21, 21); +MAP_STATIC_SYMBOL( + int rte_mempool_populate_virt(struct rte_mempool *mp, + char *addr, size_t len, size_t pg_sz, + rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque), + rte_mempool_populate_virt_v21); + +__vsym int +rte_mempool_populate_virt_v20(struct rte_mempool *mp, char *addr, + size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque); + +__vsym int +rte_mempool_populate_virt_v20(struct rte_mempool *mp, char *addr, + size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque) +{ + int ret; + + ret = rte_mempool_populate_virt_v21(mp, addr, len, pg_sz, + free_cb, opaque); + + if (ret == 0) + ret = -EINVAL; + + return ret; +} +VERSION_SYMBOL(rte_mempool_populate_virt, _v20, 20.0); + +/* Get the minimal page size used in a mempool before populating it. */ +int +rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz) +{ + bool need_iova_contig_obj; + bool alloc_in_ext_mem; + int ret; + + /* check if we can retrieve a valid socket ID */ + ret = rte_malloc_heap_socket_is_external(mp->socket_id); + if (ret < 0) + return -EINVAL; + alloc_in_ext_mem = (ret == 1); + need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG); + + if (!need_iova_contig_obj) + *pg_sz = 0; + else if (rte_eal_has_hugepages() || alloc_in_ext_mem) + *pg_sz = get_min_page_size(mp->socket_id); + else + *pg_sz = getpagesize(); + + rte_mempool_trace_get_page_size(mp, *pg_sz); + return 0; +} /* Default function to populate the mempool: allocate memory in memzones, * and populate them. Return the number of objects added, or a negative @@ -425,13 +543,12 @@ rte_mempool_populate_default(struct rte_mempool *mp) char mz_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; ssize_t mem_size; - size_t align, pg_sz, pg_shift; + size_t align, pg_sz, pg_shift = 0; rte_iova_t iova; unsigned mz_id, n; int ret; bool need_iova_contig_obj; - bool try_iova_contig_mempool; - bool alloc_in_ext_mem; + size_t max_alloc_size = SIZE_MAX; ret = mempool_ops_alloc_once(mp); if (ret != 0) @@ -456,17 +573,15 @@ rte_mempool_populate_default(struct rte_mempool *mp) * then just set page shift and page size to 0, because the user has * indicated that there's no need to care about anything. * - * if we do need contiguous objects, there is also an option to reserve - * the entire mempool memory as one contiguous block of memory, in - * which case the page shift and alignment wouldn't matter as well. + * if we do need contiguous objects (if a mempool driver has its + * own calc_size() method returning min_chunk_size = mem_size), + * there is also an option to reserve the entire mempool memory + * as one contiguous block of memory. * * if we require contiguous objects, but not necessarily the entire - * mempool reserved space to be contiguous, then there are two options. - * - * if our IO addresses are virtual, not actual physical (IOVA as VA - * case), then no page shift needed - our memory allocation will give us - * contiguous IO memory as far as the hardware is concerned, so - * act as if we're getting contiguous memory. + * mempool reserved space to be contiguous, pg_sz will be != 0, + * and the default ops->populate() will take care of not placing + * objects across pages. * * if our IO addresses are physical, we may get memory from bigger * pages, or we might get memory from smaller pages, and how much of it @@ -477,53 +592,23 @@ rte_mempool_populate_default(struct rte_mempool *mp) * wasting some space this way, but it's much nicer than looping around * trying to reserve each and every page size. * - * However, since size calculation will produce page-aligned sizes, it - * makes sense to first try and see if we can reserve the entire memzone - * in one contiguous chunk as well (otherwise we might end up wasting a - * 1G page on a 10MB memzone). If we fail to get enough contiguous - * memory, then we'll go and reserve space page-by-page. - * - * We also have to take into account the fact that memory that we're - * going to allocate from can belong to an externally allocated memory - * area, in which case the assumption of IOVA as VA mode being - * synonymous with IOVA contiguousness will not hold. We should also try - * to go for contiguous memory even if we're in no-huge mode, because - * external memory may in fact be IOVA-contiguous. + * If we fail to get enough contiguous memory, then we'll go and + * reserve space in smaller chunks. */ - /* check if we can retrieve a valid socket ID */ - ret = rte_malloc_heap_socket_is_external(mp->socket_id); - if (ret < 0) - return -EINVAL; - alloc_in_ext_mem = (ret == 1); need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG); - try_iova_contig_mempool = false; - - if (!need_iova_contig_obj) { - pg_sz = 0; - pg_shift = 0; - } else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) { - pg_sz = 0; - pg_shift = 0; - } else if (rte_eal_has_hugepages() || alloc_in_ext_mem) { - try_iova_contig_mempool = true; - pg_sz = get_min_page_size(mp->socket_id); - pg_shift = rte_bsf32(pg_sz); - } else { - pg_sz = getpagesize(); + ret = rte_mempool_get_page_size(mp, &pg_sz); + if (ret < 0) + return ret; + + if (pg_sz != 0) pg_shift = rte_bsf32(pg_sz); - } for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) { size_t min_chunk_size; - unsigned int flags; - if (try_iova_contig_mempool || pg_sz == 0) - mem_size = rte_mempool_ops_calc_mem_size(mp, n, - 0, &min_chunk_size, &align); - else - mem_size = rte_mempool_ops_calc_mem_size(mp, n, - pg_shift, &min_chunk_size, &align); + mem_size = rte_mempool_ops_calc_mem_size( + mp, n, pg_shift, &min_chunk_size, &align); if (mem_size < 0) { ret = mem_size; @@ -537,63 +622,36 @@ rte_mempool_populate_default(struct rte_mempool *mp) goto fail; } - flags = mz_flags; - /* if we're trying to reserve contiguous memory, add appropriate * memzone flag. */ - if (try_iova_contig_mempool) - flags |= RTE_MEMZONE_IOVA_CONTIG; + if (min_chunk_size == (size_t)mem_size) + mz_flags |= RTE_MEMZONE_IOVA_CONTIG; - mz = rte_memzone_reserve_aligned(mz_name, mem_size, - mp->socket_id, flags, align); + /* Allocate a memzone, retrying with a smaller area on ENOMEM */ + do { + mz = rte_memzone_reserve_aligned(mz_name, + RTE_MIN((size_t)mem_size, max_alloc_size), + mp->socket_id, mz_flags, align); - /* if we were trying to allocate contiguous memory, failed and - * minimum required contiguous chunk fits minimum page, adjust - * memzone size to the page size, and try again. - */ - if (mz == NULL && try_iova_contig_mempool && - min_chunk_size <= pg_sz) { - try_iova_contig_mempool = false; - flags &= ~RTE_MEMZONE_IOVA_CONTIG; - - mem_size = rte_mempool_ops_calc_mem_size(mp, n, - pg_shift, &min_chunk_size, &align); - if (mem_size < 0) { - ret = mem_size; - goto fail; - } + if (mz == NULL && rte_errno != ENOMEM) + break; + + max_alloc_size = RTE_MIN(max_alloc_size, + (size_t)mem_size) / 2; + } while (mz == NULL && max_alloc_size >= min_chunk_size); - mz = rte_memzone_reserve_aligned(mz_name, mem_size, - mp->socket_id, flags, align); - } - /* don't try reserving with 0 size if we were asked to reserve - * IOVA-contiguous memory. - */ - if (min_chunk_size < (size_t)mem_size && mz == NULL) { - /* not enough memory, retry with the biggest zone we - * have - */ - mz = rte_memzone_reserve_aligned(mz_name, 0, - mp->socket_id, flags, align); - } if (mz == NULL) { ret = -rte_errno; goto fail; } - if (mz->len < min_chunk_size) { - rte_memzone_free(mz); - ret = -ENOMEM; - goto fail; - } - if (need_iova_contig_obj) iova = mz->iova; else iova = RTE_BAD_IOVA; - if (try_iova_contig_mempool || pg_sz == 0) + if (pg_sz == 0 || (mz_flags & RTE_MEMZONE_IOVA_CONTIG)) ret = rte_mempool_populate_iova(mp, mz->addr, iova, mz->len, rte_mempool_memchunk_mz_free, @@ -603,12 +661,15 @@ rte_mempool_populate_default(struct rte_mempool *mp) mz->len, pg_sz, rte_mempool_memchunk_mz_free, (void *)(uintptr_t)mz); + if (ret == 0) /* should not happen */ + ret = -ENOBUFS; if (ret < 0) { rte_memzone_free(mz); goto fail; } } + rte_mempool_trace_populate_default(mp); return mp->size; fail: @@ -667,8 +728,10 @@ rte_mempool_populate_anon(struct rte_mempool *mp) } ret = mempool_ops_alloc_once(mp); - if (ret != 0) - return ret; + if (ret < 0) { + rte_errno = -ret; + return 0; + } size = get_anon_size(mp); if (size < 0) { @@ -692,9 +755,14 @@ rte_mempool_populate_anon(struct rte_mempool *mp) ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(), rte_mempool_memchunk_anon_free, addr); - if (ret == 0) + if (ret == 0) /* should not happen */ + ret = -ENOBUFS; + if (ret < 0) { + rte_errno = -ret; goto fail; + } + rte_mempool_trace_populate_anon(mp); return mp->populated_size; fail: @@ -726,6 +794,7 @@ rte_mempool_free(struct rte_mempool *mp) } rte_mcfg_tailq_write_unlock(); + rte_mempool_trace_free(mp); rte_mempool_free_memchunks(mp); rte_mempool_ops_free(mp); rte_memzone_free(mp->mz); @@ -764,6 +833,7 @@ rte_mempool_cache_create(uint32_t size, int socket_id) mempool_cache_init(cache, size); + rte_mempool_trace_cache_create(size, socket_id, cache); return cache; } @@ -775,6 +845,7 @@ rte_mempool_cache_create(uint32_t size, int socket_id) void rte_mempool_cache_free(struct rte_mempool_cache *cache) { + rte_mempool_trace_cache_free(cache); rte_free(cache); } @@ -905,6 +976,8 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, rte_mcfg_tailq_write_unlock(); rte_mcfg_mempool_write_unlock(); + rte_mempool_trace_create_empty(name, n, elt_size, cache_size, + private_data_size, flags, mp); return mp; exit_unlock: @@ -957,6 +1030,9 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, if (obj_init) rte_mempool_obj_iter(mp, obj_init, obj_init_arg); + rte_mempool_trace_create(name, n, elt_size, cache_size, + private_data_size, mp_init, mp_init_arg, obj_init, + obj_init_arg, flags, mp); return mp; fail: