X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.c;h=0f29e87128e1a83783ae3d32a22e31415a03a6ee;hb=a2b5a8722f20265896cb1ac026c716239f6313bf;hp=fe90ed3f3989a32aafebc67f7fe6545f4caa6342;hpb=57ba4f5f95630c8dd1c6e46e25d1f16f43253714;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index fe90ed3f39..0f29e87128 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -1,36 +1,9 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation. + * Copyright(c) 2016 6WIND S.A. */ +#include #include #include #include @@ -39,6 +12,7 @@ #include #include #include +#include #include #include @@ -53,10 +27,10 @@ #include #include #include -#include #include #include #include +#include #include "rte_mempool.h" @@ -126,134 +100,100 @@ static unsigned optimize_object_size(unsigned obj_size) return new_obj_size * RTE_MEMPOOL_ALIGN; } +struct pagesz_walk_arg { + int socket_id; + size_t min; +}; + +static int +find_min_pagesz(const struct rte_memseg_list *msl, void *arg) +{ + struct pagesz_walk_arg *wa = arg; + bool valid; + + /* + * we need to only look at page sizes available for a particular socket + * ID. so, we either need an exact match on socket ID (can match both + * native and external memory), or, if SOCKET_ID_ANY was specified as a + * socket ID argument, we must only look at native memory and ignore any + * page sizes associated with external memory. + */ + valid = msl->socket_id == wa->socket_id; + valid |= wa->socket_id == SOCKET_ID_ANY && msl->external == 0; + + if (valid && msl->page_sz < wa->min) + wa->min = msl->page_sz; + + return 0; +} + +static size_t +get_min_page_size(int socket_id) +{ + struct pagesz_walk_arg wa; + + wa.min = SIZE_MAX; + wa.socket_id = socket_id; + + rte_memseg_list_walk(find_min_pagesz, &wa); + + return wa.min == SIZE_MAX ? (size_t) getpagesize() : wa.min; +} + + static void -mempool_add_elem(struct rte_mempool *mp, void *obj, uint32_t obj_idx, - rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg) +mempool_add_elem(struct rte_mempool *mp, __rte_unused void *opaque, + void *obj, rte_iova_t iova) { struct rte_mempool_objhdr *hdr; struct rte_mempool_objtlr *tlr __rte_unused; - obj = (char *)obj + mp->header_size; - /* set mempool ptr in header */ hdr = RTE_PTR_SUB(obj, sizeof(*hdr)); hdr->mp = mp; + hdr->iova = iova; + STAILQ_INSERT_TAIL(&mp->elt_list, hdr, next); + mp->populated_size++; #ifdef RTE_LIBRTE_MEMPOOL_DEBUG hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2; tlr = __mempool_get_trailer(obj); tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE; #endif - /* call the initializer */ - if (obj_init) - obj_init(mp, obj_init_arg, obj, obj_idx); - - /* enqueue in ring */ - rte_ring_sp_enqueue(mp->ring, obj); } -/* Iterate through objects at the given address - * - * Given the pointer to the memory, and its topology in physical memory - * (the physical addresses table), iterate through the "elt_num" objects - * of size "elt_sz" aligned at "align". For each object in this memory - * chunk, invoke a callback. It returns the effective number of objects - * in this memory. - */ +/* call obj_cb() for each mempool element */ uint32_t -rte_mempool_obj_iter(void *vaddr, uint32_t elt_num, size_t total_elt_sz, - size_t align, const phys_addr_t paddr[], uint32_t pg_num, - uint32_t pg_shift, rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg) +rte_mempool_obj_iter(struct rte_mempool *mp, + rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg) { - uint32_t i, j, k; - uint32_t pgn, pgf; - uintptr_t end, start, va; - uintptr_t pg_sz; - - pg_sz = (uintptr_t)1 << pg_shift; - va = (uintptr_t)vaddr; - - i = 0; - j = 0; - - while (i != elt_num && j != pg_num) { - - start = RTE_ALIGN_CEIL(va, align); - end = start + total_elt_sz; - - /* index of the first page for the next element. */ - pgf = (end >> pg_shift) - (start >> pg_shift); - - /* index of the last page for the current element. */ - pgn = ((end - 1) >> pg_shift) - (start >> pg_shift); - pgn += j; - - /* do we have enough space left for the element. */ - if (pgn >= pg_num) - break; - - for (k = j; - k != pgn && - paddr[k] + pg_sz == paddr[k + 1]; - k++) - ; + struct rte_mempool_objhdr *hdr; + void *obj; + unsigned n = 0; - /* - * if next pgn chunks of memory physically continuous, - * use it to create next element. - * otherwise, just skip that chunk unused. - */ - if (k == pgn) { - if (obj_iter != NULL) - obj_iter(obj_iter_arg, (void *)start, - (void *)end, i); - va = end; - j += pgf; - i++; - } else { - va = RTE_ALIGN_CEIL((va + 1), pg_sz); - j++; - } + STAILQ_FOREACH(hdr, &mp->elt_list, next) { + obj = (char *)hdr + sizeof(*hdr); + obj_cb(mp, obj_cb_arg, obj, n); + n++; } - return i; + return n; } -/* - * Populate mempool with the objects. - */ - -struct mempool_populate_arg { - struct rte_mempool *mp; - rte_mempool_obj_ctor_t *obj_init; - void *obj_init_arg; -}; - -static void -mempool_obj_populate(void *arg, void *start, void *end, uint32_t idx) -{ - struct mempool_populate_arg *pa = arg; - - mempool_add_elem(pa->mp, start, idx, pa->obj_init, pa->obj_init_arg); - pa->mp->elt_va_end = (uintptr_t)end; -} - -static void -mempool_populate(struct rte_mempool *mp, size_t num, size_t align, - rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg) +/* call mem_cb() for each mempool memory chunk */ +uint32_t +rte_mempool_mem_iter(struct rte_mempool *mp, + rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg) { - uint32_t elt_sz; - struct mempool_populate_arg arg; + struct rte_mempool_memhdr *hdr; + unsigned n = 0; - elt_sz = mp->elt_size + mp->header_size + mp->trailer_size; - arg.mp = mp; - arg.obj_init = obj_init; - arg.obj_init_arg = obj_init_arg; + STAILQ_FOREACH(hdr, &mp->mem_list, next) { + mem_cb(mp, mem_cb_arg, hdr, n); + n++; + } - mp->size = rte_mempool_obj_iter((void *)mp->elt_va_start, - num, elt_sz, align, - mp->elt_pa, mp->pg_num, mp->pg_shift, - mempool_obj_populate, &arg); + return n; } /* get the header, trailer and total size of a mempool element. */ @@ -265,24 +205,17 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, sz = (sz != NULL) ? sz : &lsz; - /* - * In header, we have at least the pointer to the pool, and - * optionaly a 64 bits cookie. - */ - sz->header_size = 0; - sz->header_size += sizeof(struct rte_mempool *); /* ptr to pool */ -#ifdef RTE_LIBRTE_MEMPOOL_DEBUG - sz->header_size += sizeof(uint64_t); /* cookie */ -#endif + sz->header_size = sizeof(struct rte_mempool_objhdr); if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) sz->header_size = RTE_ALIGN_CEIL(sz->header_size, RTE_MEMPOOL_ALIGN); - /* trailer contains the cookie in debug mode */ - sz->trailer_size = 0; #ifdef RTE_LIBRTE_MEMPOOL_DEBUG - sz->trailer_size += sizeof(uint64_t); /* cookie */ + sz->trailer_size = sizeof(struct rte_mempool_objtlr); +#else + sz->trailer_size = 0; #endif + /* element size is 8 bytes-aligned at least */ sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t)); @@ -306,160 +239,562 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, sz->trailer_size = new_size - sz->header_size - sz->elt_size; } - if (! rte_eal_has_hugepages()) { - /* - * compute trailer size so that pool elements fit exactly in - * a standard page - */ - int page_size = getpagesize(); - int new_size = page_size - sz->header_size - sz->elt_size; - if (new_size < 0 || (unsigned int)new_size < sz->trailer_size) { - printf("When hugepages are disabled, pool objects " - "can't exceed PAGE_SIZE: %d + %d + %d > %d\n", - sz->header_size, sz->elt_size, sz->trailer_size, - page_size); - return 0; - } - sz->trailer_size = new_size; - } - /* this is the size of an object, including header and trailer */ sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size; return sz->total_size; } +/* free a memchunk allocated with rte_memzone_reserve() */ +static void +rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr, + void *opaque) +{ + const struct rte_memzone *mz = opaque; + rte_memzone_free(mz); +} -/* - * Calculate maximum amount of memory required to store given number of objects. +/* Free memory chunks used by a mempool. Objects must be in pool */ +static void +rte_mempool_free_memchunks(struct rte_mempool *mp) +{ + struct rte_mempool_memhdr *memhdr; + void *elt; + + while (!STAILQ_EMPTY(&mp->elt_list)) { + rte_mempool_ops_dequeue_bulk(mp, &elt, 1); + (void)elt; + STAILQ_REMOVE_HEAD(&mp->elt_list, next); + mp->populated_size--; + } + + while (!STAILQ_EMPTY(&mp->mem_list)) { + memhdr = STAILQ_FIRST(&mp->mem_list); + STAILQ_REMOVE_HEAD(&mp->mem_list, next); + if (memhdr->free_cb != NULL) + memhdr->free_cb(memhdr, memhdr->opaque); + rte_free(memhdr); + mp->nb_mem_chunks--; + } +} + +static int +mempool_ops_alloc_once(struct rte_mempool *mp) +{ + int ret; + + /* create the internal ring if not already done */ + if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) { + ret = rte_mempool_ops_alloc(mp); + if (ret != 0) + return ret; + mp->flags |= MEMPOOL_F_POOL_CREATED; + } + return 0; +} + +/* Add objects in the pool, using a physically contiguous memory + * zone. Return the number of objects added, or a negative value + * on error. */ -size_t -rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift) +int +rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, + rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque) { - size_t n, pg_num, pg_sz, sz; + unsigned i = 0; + size_t off; + struct rte_mempool_memhdr *memhdr; + int ret; + + ret = mempool_ops_alloc_once(mp); + if (ret != 0) + return ret; + + /* mempool is already populated */ + if (mp->populated_size >= mp->size) + return -ENOSPC; + + memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0); + if (memhdr == NULL) + return -ENOMEM; + + memhdr->mp = mp; + memhdr->addr = vaddr; + memhdr->iova = iova; + memhdr->len = len; + memhdr->free_cb = free_cb; + memhdr->opaque = opaque; + + if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) + off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr; + else + off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr; - pg_sz = (size_t)1 << pg_shift; + if (off > len) { + ret = -EINVAL; + goto fail; + } - if ((n = pg_sz / total_elt_sz) > 0) { - pg_num = (elt_num + n - 1) / n; - sz = pg_num << pg_shift; - } else { - sz = RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * elt_num; + i = rte_mempool_ops_populate(mp, mp->size - mp->populated_size, + (char *)vaddr + off, + (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off), + len - off, mempool_add_elem, NULL); + + /* not enough room to store one object */ + if (i == 0) { + ret = -EINVAL; + goto fail; } - return sz; + STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next); + mp->nb_mem_chunks++; + return i; + +fail: + rte_free(memhdr); + return ret; } -/* Callback used by rte_mempool_xmem_usage(): it sets the opaque - * argument to the end of the object. +/* Populate the mempool with a virtual area. Return the number of + * objects added, or a negative value on error. */ -static void -mempool_lelem_iter(void *arg, __rte_unused void *start, void *end, - __rte_unused uint32_t idx) +int +rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, + size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque) { - *(uintptr_t *)arg = (uintptr_t)end; + rte_iova_t iova; + size_t off, phys_len; + int ret, cnt = 0; + + /* address and len must be page-aligned */ + if (RTE_PTR_ALIGN_CEIL(addr, pg_sz) != addr) + return -EINVAL; + if (RTE_ALIGN_CEIL(len, pg_sz) != len) + return -EINVAL; + + if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) + return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA, + len, free_cb, opaque); + + for (off = 0; off + pg_sz <= len && + mp->populated_size < mp->size; off += phys_len) { + + iova = rte_mem_virt2iova(addr + off); + + if (iova == RTE_BAD_IOVA && rte_eal_has_hugepages()) { + ret = -EINVAL; + goto fail; + } + + /* populate with the largest group of contiguous pages */ + for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) { + rte_iova_t iova_tmp; + + iova_tmp = rte_mem_virt2iova(addr + off + phys_len); + + if (iova_tmp != iova + phys_len) + break; + } + + ret = rte_mempool_populate_iova(mp, addr + off, iova, + phys_len, free_cb, opaque); + if (ret < 0) + goto fail; + /* no need to call the free callback for next chunks */ + free_cb = NULL; + cnt += ret; + } + + return cnt; + + fail: + rte_mempool_free_memchunks(mp); + return ret; } -/* - * Calculate how much memory would be actually required with the - * given memory footprint to store required number of elements. +/* Default function to populate the mempool: allocate memory in memzones, + * and populate them. Return the number of objects added, or a negative + * value on error. */ -ssize_t -rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t total_elt_sz, - const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift) +int +rte_mempool_populate_default(struct rte_mempool *mp) { - uint32_t n; - uintptr_t va, uv; - size_t pg_sz, usz; + unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; + char mz_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + ssize_t mem_size; + size_t align, pg_sz, pg_shift; + rte_iova_t iova; + unsigned mz_id, n; + int ret; + bool need_iova_contig_obj; + bool try_iova_contig_mempool; + bool alloc_in_ext_mem; + + ret = mempool_ops_alloc_once(mp); + if (ret != 0) + return ret; + + /* mempool must not be populated */ + if (mp->nb_mem_chunks != 0) + return -EEXIST; + + /* + * the following section calculates page shift and page size values. + * + * these values impact the result of calc_mem_size operation, which + * returns the amount of memory that should be allocated to store the + * desired number of objects. when not zero, it allocates more memory + * for the padding between objects, to ensure that an object does not + * cross a page boundary. in other words, page size/shift are to be set + * to zero if mempool elements won't care about page boundaries. + * there are several considerations for page size and page shift here. + * + * if we don't need our mempools to have physically contiguous objects, + * then just set page shift and page size to 0, because the user has + * indicated that there's no need to care about anything. + * + * if we do need contiguous objects, there is also an option to reserve + * the entire mempool memory as one contiguous block of memory, in + * which case the page shift and alignment wouldn't matter as well. + * + * if we require contiguous objects, but not necessarily the entire + * mempool reserved space to be contiguous, then there are two options. + * + * if our IO addresses are virtual, not actual physical (IOVA as VA + * case), then no page shift needed - our memory allocation will give us + * contiguous IO memory as far as the hardware is concerned, so + * act as if we're getting contiguous memory. + * + * if our IO addresses are physical, we may get memory from bigger + * pages, or we might get memory from smaller pages, and how much of it + * we require depends on whether we want bigger or smaller pages. + * However, requesting each and every memory size is too much work, so + * what we'll do instead is walk through the page sizes available, pick + * the smallest one and set up page shift to match that one. We will be + * wasting some space this way, but it's much nicer than looping around + * trying to reserve each and every page size. + * + * However, since size calculation will produce page-aligned sizes, it + * makes sense to first try and see if we can reserve the entire memzone + * in one contiguous chunk as well (otherwise we might end up wasting a + * 1G page on a 10MB memzone). If we fail to get enough contiguous + * memory, then we'll go and reserve space page-by-page. + * + * We also have to take into account the fact that memory that we're + * going to allocate from can belong to an externally allocated memory + * area, in which case the assumption of IOVA as VA mode being + * synonymous with IOVA contiguousness will not hold. We should also try + * to go for contiguous memory even if we're in no-huge mode, because + * external memory may in fact be IOVA-contiguous. + */ + + /* check if we can retrieve a valid socket ID */ + ret = rte_malloc_heap_socket_is_external(mp->socket_id); + if (ret < 0) + return -EINVAL; + alloc_in_ext_mem = (ret == 1); + need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG); + try_iova_contig_mempool = false; + + if (!need_iova_contig_obj) { + pg_sz = 0; + pg_shift = 0; + } else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) { + pg_sz = 0; + pg_shift = 0; + } else if (rte_eal_has_hugepages() || alloc_in_ext_mem) { + try_iova_contig_mempool = true; + pg_sz = get_min_page_size(mp->socket_id); + pg_shift = rte_bsf32(pg_sz); + } else { + pg_sz = getpagesize(); + pg_shift = rte_bsf32(pg_sz); + } + + for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) { + size_t min_chunk_size; + unsigned int flags; + + if (try_iova_contig_mempool || pg_sz == 0) + mem_size = rte_mempool_ops_calc_mem_size(mp, n, + 0, &min_chunk_size, &align); + else + mem_size = rte_mempool_ops_calc_mem_size(mp, n, + pg_shift, &min_chunk_size, &align); + + if (mem_size < 0) { + ret = mem_size; + goto fail; + } - pg_sz = (size_t)1 << pg_shift; - va = (uintptr_t)vaddr; - uv = va; + ret = snprintf(mz_name, sizeof(mz_name), + RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id); + if (ret < 0 || ret >= (int)sizeof(mz_name)) { + ret = -ENAMETOOLONG; + goto fail; + } + + flags = mz_flags; + + /* if we're trying to reserve contiguous memory, add appropriate + * memzone flag. + */ + if (try_iova_contig_mempool) + flags |= RTE_MEMZONE_IOVA_CONTIG; + + mz = rte_memzone_reserve_aligned(mz_name, mem_size, + mp->socket_id, flags, align); - if ((n = rte_mempool_obj_iter(vaddr, elt_num, total_elt_sz, 1, - paddr, pg_num, pg_shift, mempool_lelem_iter, - &uv)) != elt_num) { - return -(ssize_t)n; + /* if we were trying to allocate contiguous memory, failed and + * minimum required contiguous chunk fits minimum page, adjust + * memzone size to the page size, and try again. + */ + if (mz == NULL && try_iova_contig_mempool && + min_chunk_size <= pg_sz) { + try_iova_contig_mempool = false; + flags &= ~RTE_MEMZONE_IOVA_CONTIG; + + mem_size = rte_mempool_ops_calc_mem_size(mp, n, + pg_shift, &min_chunk_size, &align); + if (mem_size < 0) { + ret = mem_size; + goto fail; + } + + mz = rte_memzone_reserve_aligned(mz_name, mem_size, + mp->socket_id, flags, align); + } + /* don't try reserving with 0 size if we were asked to reserve + * IOVA-contiguous memory. + */ + if (min_chunk_size < (size_t)mem_size && mz == NULL) { + /* not enough memory, retry with the biggest zone we + * have + */ + mz = rte_memzone_reserve_aligned(mz_name, 0, + mp->socket_id, flags, + RTE_MAX(pg_sz, align)); + } + if (mz == NULL) { + ret = -rte_errno; + goto fail; + } + + if (mz->len < min_chunk_size) { + rte_memzone_free(mz); + ret = -ENOMEM; + goto fail; + } + + if (need_iova_contig_obj) + iova = mz->iova; + else + iova = RTE_BAD_IOVA; + + if (try_iova_contig_mempool || pg_sz == 0) + ret = rte_mempool_populate_iova(mp, mz->addr, + iova, mz->len, + rte_mempool_memchunk_mz_free, + (void *)(uintptr_t)mz); + else + ret = rte_mempool_populate_virt(mp, mz->addr, + RTE_ALIGN_FLOOR(mz->len, pg_sz), pg_sz, + rte_mempool_memchunk_mz_free, + (void *)(uintptr_t)mz); + if (ret < 0) { + rte_memzone_free(mz); + goto fail; + } } - uv = RTE_ALIGN_CEIL(uv, pg_sz); - usz = uv - va; - return usz; + return mp->size; + + fail: + rte_mempool_free_memchunks(mp); + return ret; } -#ifndef RTE_LIBRTE_XEN_DOM0 -/* stub if DOM0 support not configured */ -struct rte_mempool * -rte_dom0_mempool_create(const char *name __rte_unused, - unsigned n __rte_unused, - unsigned elt_size __rte_unused, - unsigned cache_size __rte_unused, - unsigned private_data_size __rte_unused, - rte_mempool_ctor_t *mp_init __rte_unused, - void *mp_init_arg __rte_unused, - rte_mempool_obj_ctor_t *obj_init __rte_unused, - void *obj_init_arg __rte_unused, - int socket_id __rte_unused, - unsigned flags __rte_unused) -{ - rte_errno = EINVAL; - return NULL; +/* return the memory size required for mempool objects in anonymous mem */ +static ssize_t +get_anon_size(const struct rte_mempool *mp) +{ + ssize_t size; + size_t pg_sz, pg_shift; + size_t min_chunk_size; + size_t align; + + pg_sz = getpagesize(); + pg_shift = rte_bsf32(pg_sz); + size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift, + &min_chunk_size, &align); + + return size; } -#endif -/* create the mempool */ -struct rte_mempool * -rte_mempool_create(const char *name, unsigned n, unsigned elt_size, - unsigned cache_size, unsigned private_data_size, - rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, - int socket_id, unsigned flags) -{ - if (rte_xen_dom0_supported()) - return rte_dom0_mempool_create(name, n, elt_size, - cache_size, private_data_size, - mp_init, mp_init_arg, - obj_init, obj_init_arg, - socket_id, flags); - else - return rte_mempool_xmem_create(name, n, elt_size, - cache_size, private_data_size, - mp_init, mp_init_arg, - obj_init, obj_init_arg, - socket_id, flags, - NULL, NULL, MEMPOOL_PG_NUM_DEFAULT, - MEMPOOL_PG_SHIFT_MAX); +/* unmap a memory zone mapped by rte_mempool_populate_anon() */ +static void +rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr *memhdr, + void *opaque) +{ + ssize_t size; + + /* + * Calculate size since memhdr->len has contiguous chunk length + * which may be smaller if anon map is split into many contiguous + * chunks. Result must be the same as we calculated on populate. + */ + size = get_anon_size(memhdr->mp); + if (size < 0) + return; + + munmap(opaque, size); +} + +/* populate the mempool with an anonymous mapping */ +int +rte_mempool_populate_anon(struct rte_mempool *mp) +{ + ssize_t size; + int ret; + char *addr; + + /* mempool is already populated, error */ + if ((!STAILQ_EMPTY(&mp->mem_list)) || mp->nb_mem_chunks != 0) { + rte_errno = EINVAL; + return 0; + } + + ret = mempool_ops_alloc_once(mp); + if (ret != 0) + return ret; + + size = get_anon_size(mp); + if (size < 0) { + rte_errno = -size; + return 0; + } + + /* get chunk of virtually continuous memory */ + addr = mmap(NULL, size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (addr == MAP_FAILED) { + rte_errno = errno; + return 0; + } + /* can't use MMAP_LOCKED, it does not exist on BSD */ + if (mlock(addr, size) < 0) { + rte_errno = errno; + munmap(addr, size); + return 0; + } + + ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(), + rte_mempool_memchunk_anon_free, addr); + if (ret == 0) + goto fail; + + return mp->populated_size; + + fail: + rte_mempool_free_memchunks(mp); + return 0; +} + +/* free a mempool */ +void +rte_mempool_free(struct rte_mempool *mp) +{ + struct rte_mempool_list *mempool_list = NULL; + struct rte_tailq_entry *te; + + if (mp == NULL) + return; + + mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list); + rte_mcfg_tailq_write_lock(); + /* find out tailq entry */ + TAILQ_FOREACH(te, mempool_list, next) { + if (te->data == (void *)mp) + break; + } + + if (te != NULL) { + TAILQ_REMOVE(mempool_list, te, next); + rte_free(te); + } + rte_mcfg_tailq_write_unlock(); + + rte_mempool_free_memchunks(mp); + rte_mempool_ops_free(mp); + rte_memzone_free(mp->mz); +} + +static void +mempool_cache_init(struct rte_mempool_cache *cache, uint32_t size) +{ + cache->size = size; + cache->flushthresh = CALC_CACHE_FLUSHTHRESH(size); + cache->len = 0; +} + +/* + * Create and initialize a cache for objects that are retrieved from and + * returned to an underlying mempool. This structure is identical to the + * local_cache[lcore_id] pointed to by the mempool structure. + */ +struct rte_mempool_cache * +rte_mempool_cache_create(uint32_t size, int socket_id) +{ + struct rte_mempool_cache *cache; + + if (size == 0 || size > RTE_MEMPOOL_CACHE_MAX_SIZE) { + rte_errno = EINVAL; + return NULL; + } + + cache = rte_zmalloc_socket("MEMPOOL_CACHE", sizeof(*cache), + RTE_CACHE_LINE_SIZE, socket_id); + if (cache == NULL) { + RTE_LOG(ERR, MEMPOOL, "Cannot allocate mempool cache.\n"); + rte_errno = ENOMEM; + return NULL; + } + + mempool_cache_init(cache, size); + + return cache; } /* - * Create the mempool over already allocated chunk of memory. - * That external memory buffer can consists of physically disjoint pages. - * Setting vaddr to NULL, makes mempool to fallback to original behaviour - * and allocate space for mempool and it's elements as one big chunk of - * physically continuos memory. - * */ + * Free a cache. It's the responsibility of the user to make sure that any + * remaining objects in the cache are flushed to the corresponding + * mempool. + */ +void +rte_mempool_cache_free(struct rte_mempool_cache *cache) +{ + rte_free(cache); +} + +/* create an empty mempool */ struct rte_mempool * -rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, - unsigned cache_size, unsigned private_data_size, - rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, - int socket_id, unsigned flags, void *vaddr, - const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift) +rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + int socket_id, unsigned flags) { char mz_name[RTE_MEMZONE_NAMESIZE]; - char rg_name[RTE_RING_NAMESIZE]; struct rte_mempool_list *mempool_list; struct rte_mempool *mp = NULL; struct rte_tailq_entry *te = NULL; - struct rte_ring *r = NULL; - const struct rte_memzone *mz; + const struct rte_memzone *mz = NULL; size_t mempool_size; - int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; - int rg_flags = 0; - void *obj; + unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; struct rte_mempool_objsz objsz; - void *startaddr; - int page_size = getpagesize(); + unsigned lcore_id; + int ret; /* compilation-time checks */ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) & @@ -475,21 +810,15 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list); - /* asked cache too big */ - if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE || - CALC_CACHE_FLUSHTHRESH(cache_size) > n) { - rte_errno = EINVAL; - return NULL; - } - - /* check that we have both VA and PA */ - if (vaddr != NULL && paddr == NULL) { + /* asked for zero items */ + if (n == 0) { rte_errno = EINVAL; return NULL; } - /* Check that pg_num and pg_shift parameters are valid. */ - if (pg_num < RTE_DIM(mp->elt_pa) || pg_shift > MEMPOOL_PG_SHIFT_MAX) { + /* asked cache too big */ + if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE || + CALC_CACHE_FLUSHTHRESH(cache_size) > n) { rte_errno = EINVAL; return NULL; } @@ -498,28 +827,13 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, if (flags & MEMPOOL_F_NO_CACHE_ALIGN) flags |= MEMPOOL_F_NO_SPREAD; - /* ring flags */ - if (flags & MEMPOOL_F_SP_PUT) - rg_flags |= RING_F_SP_ENQ; - if (flags & MEMPOOL_F_SC_GET) - rg_flags |= RING_F_SC_DEQ; - /* calculate mempool object sizes. */ if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) { rte_errno = EINVAL; return NULL; } - rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK); - - /* allocate the ring that will be used to store objects */ - /* Ring functions will return appropriate errors if we are - * running as a secondary process etc., so no checks made - * in this function for that condition */ - snprintf(rg_name, sizeof(rg_name), RTE_MEMPOOL_MZ_FORMAT, name); - r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags); - if (r == NULL) - goto exit_unlock; + rte_mcfg_mempool_write_lock(); /* * reserve a memory zone for this mempool: private data is @@ -528,16 +842,6 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, private_data_size = (private_data_size + RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK); - if (! rte_eal_has_hugepages()) { - /* - * expand private data size to a whole page, so that the - * first pool element will start on a new standard page - */ - int head = sizeof(struct rte_mempool); - int new_size = (private_data_size + head) % page_size; - if (new_size) - private_data_size += page_size - new_size; - } /* try to allocate tailq entry */ te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0); @@ -546,120 +850,129 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, goto exit_unlock; } - /* - * If user provided an external memory buffer, then use it to - * store mempool objects. Otherwise reserve a memzone that is large - * enough to hold mempool header and metadata plus mempool objects. - */ - mempool_size = MEMPOOL_HEADER_SIZE(mp, pg_num, cache_size); + mempool_size = MEMPOOL_HEADER_SIZE(mp, cache_size); mempool_size += private_data_size; mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN); - if (vaddr == NULL) - mempool_size += (size_t)objsz.total_size * n; - - if (! rte_eal_has_hugepages()) { - /* - * we want the memory pool to start on a page boundary, - * because pool elements crossing page boundaries would - * result in discontiguous physical addresses - */ - mempool_size += page_size; - } - snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name); + ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name); + if (ret < 0 || ret >= (int)sizeof(mz_name)) { + rte_errno = ENAMETOOLONG; + goto exit_unlock; + } mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags); if (mz == NULL) goto exit_unlock; - if (rte_eal_has_hugepages()) { - startaddr = (void*)mz->addr; - } else { - /* align memory pool start address on a page boundary */ - unsigned long addr = (unsigned long)mz->addr; - if (addr & (page_size - 1)) { - addr += page_size; - addr &= ~(page_size - 1); - } - startaddr = (void*)addr; - } - /* init the mempool structure */ - mp = startaddr; - memset(mp, 0, sizeof(*mp)); - snprintf(mp->name, sizeof(mp->name), "%s", name); - mp->phys_addr = mz->phys_addr; - mp->ring = r; + mp = mz->addr; + memset(mp, 0, MEMPOOL_HEADER_SIZE(mp, cache_size)); + ret = strlcpy(mp->name, name, sizeof(mp->name)); + if (ret < 0 || ret >= (int)sizeof(mp->name)) { + rte_errno = ENAMETOOLONG; + goto exit_unlock; + } + mp->mz = mz; mp->size = n; mp->flags = flags; + mp->socket_id = socket_id; mp->elt_size = objsz.elt_size; mp->header_size = objsz.header_size; mp->trailer_size = objsz.trailer_size; + /* Size of default caches, zero means disabled. */ mp->cache_size = cache_size; - mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size); mp->private_data_size = private_data_size; + STAILQ_INIT(&mp->elt_list); + STAILQ_INIT(&mp->mem_list); /* * local_cache pointer is set even if cache_size is zero. * The local_cache points to just past the elt_pa[] array. */ mp->local_cache = (struct rte_mempool_cache *) - RTE_PTR_ADD(mp, MEMPOOL_HEADER_SIZE(mp, pg_num, 0)); - - /* calculate address of the first element for continuous mempool. */ - obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num, cache_size) + - private_data_size; - obj = RTE_PTR_ALIGN_CEIL(obj, RTE_MEMPOOL_ALIGN); - - /* populate address translation fields. */ - mp->pg_num = pg_num; - mp->pg_shift = pg_shift; - mp->pg_mask = RTE_LEN2MASK(mp->pg_shift, typeof(mp->pg_mask)); - - /* mempool elements allocated together with mempool */ - if (vaddr == NULL) { - mp->elt_va_start = (uintptr_t)obj; - mp->elt_pa[0] = mp->phys_addr + - (mp->elt_va_start - (uintptr_t)mp); - } else { - /* mempool elements in a separate chunk of memory. */ - mp->elt_va_start = (uintptr_t)vaddr; - memcpy(mp->elt_pa, paddr, sizeof (mp->elt_pa[0]) * pg_num); - } - - mp->elt_va_end = mp->elt_va_start; - - /* call the initializer */ - if (mp_init) - mp_init(mp, mp_init_arg); + RTE_PTR_ADD(mp, MEMPOOL_HEADER_SIZE(mp, 0)); - mempool_populate(mp, n, 1, obj_init, obj_init_arg); + /* Init all default caches. */ + if (cache_size != 0) { + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) + mempool_cache_init(&mp->local_cache[lcore_id], + cache_size); + } - te->data = (void *) mp; + te->data = mp; - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_lock(); TAILQ_INSERT_TAIL(mempool_list, te, next); - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); - rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK); + rte_mcfg_tailq_write_unlock(); + rte_mcfg_mempool_write_unlock(); return mp; exit_unlock: - rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK); - rte_ring_free(r); + rte_mcfg_mempool_write_unlock(); rte_free(te); + rte_mempool_free(mp); + return NULL; +} + +/* create the mempool */ +struct rte_mempool * +rte_mempool_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags) +{ + int ret; + struct rte_mempool *mp; + + mp = rte_mempool_create_empty(name, n, elt_size, cache_size, + private_data_size, socket_id, flags); + if (mp == NULL) + return NULL; + + /* + * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to + * set the correct index into the table of ops structs. + */ + if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET)) + ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); + else if (flags & MEMPOOL_F_SP_PUT) + ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); + else if (flags & MEMPOOL_F_SC_GET) + ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL); + else + ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); + + if (ret) + goto fail; + + /* call the mempool priv initializer */ + if (mp_init) + mp_init(mp, mp_init_arg); + + if (rte_mempool_populate_default(mp) < 0) + goto fail; + /* call the object initializers */ + if (obj_init) + rte_mempool_obj_iter(mp, obj_init, obj_init_arg); + + return mp; + + fail: + rte_mempool_free(mp); return NULL; } /* Return the number of entries in the mempool */ -unsigned -rte_mempool_count(const struct rte_mempool *mp) +unsigned int +rte_mempool_avail_count(const struct rte_mempool *mp) { unsigned count; unsigned lcore_id; - count = rte_ring_count(mp->ring); + count = rte_mempool_ops_get_count(mp); if (mp->cache_size == 0) return count; @@ -676,6 +989,13 @@ rte_mempool_count(const struct rte_mempool *mp) return count; } +/* return the number of entries allocated from the mempool */ +unsigned int +rte_mempool_in_use_count(const struct rte_mempool *mp) +{ + return mp->size - rte_mempool_avail_count(mp); +} + /* dump the cache status */ static unsigned rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp) @@ -684,7 +1004,7 @@ rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp) unsigned count = 0; unsigned cache_count; - fprintf(f, " cache infos:\n"); + fprintf(f, " internal cache infos:\n"); fprintf(f, " cache_size=%"PRIu32"\n", mp->cache_size); if (mp->cache_size == 0) @@ -692,74 +1012,144 @@ rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp) for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { cache_count = mp->local_cache[lcore_id].len; - fprintf(f, " cache_count[%u]=%u\n", lcore_id, cache_count); + fprintf(f, " cache_count[%u]=%"PRIu32"\n", + lcore_id, cache_count); count += cache_count; } fprintf(f, " total_cache_count=%u\n", count); return count; } -#ifdef RTE_LIBRTE_MEMPOOL_DEBUG -/* check cookies before and after objects */ #ifndef __INTEL_COMPILER #pragma GCC diagnostic ignored "-Wcast-qual" #endif -struct mempool_audit_arg { - const struct rte_mempool *mp; - uintptr_t obj_end; - uint32_t obj_num; -}; - -static void -mempool_obj_audit(void *arg, void *start, void *end, uint32_t idx) +/* check and update cookies or panic (internal) */ +void rte_mempool_check_cookies(const struct rte_mempool *mp, + void * const *obj_table_const, unsigned n, int free) { - struct mempool_audit_arg *pa = arg; +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + struct rte_mempool_objhdr *hdr; + struct rte_mempool_objtlr *tlr; + uint64_t cookie; + void *tmp; void *obj; - - obj = (char *)start + pa->mp->header_size; - pa->obj_end = (uintptr_t)end; - pa->obj_num = idx + 1; - __mempool_check_cookies(pa->mp, &obj, 1, 2); + void **obj_table; + + /* Force to drop the "const" attribute. This is done only when + * DEBUG is enabled */ + tmp = (void *) obj_table_const; + obj_table = tmp; + + while (n--) { + obj = obj_table[n]; + + if (rte_mempool_from_obj(obj) != mp) + rte_panic("MEMPOOL: object is owned by another " + "mempool\n"); + + hdr = __mempool_get_header(obj); + cookie = hdr->cookie; + + if (free == 0) { + if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) { + RTE_LOG(CRIT, MEMPOOL, + "obj=%p, mempool=%p, cookie=%" PRIx64 "\n", + obj, (const void *) mp, cookie); + rte_panic("MEMPOOL: bad header cookie (put)\n"); + } + hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2; + } else if (free == 1) { + if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) { + RTE_LOG(CRIT, MEMPOOL, + "obj=%p, mempool=%p, cookie=%" PRIx64 "\n", + obj, (const void *) mp, cookie); + rte_panic("MEMPOOL: bad header cookie (get)\n"); + } + hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE1; + } else if (free == 2) { + if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 && + cookie != RTE_MEMPOOL_HEADER_COOKIE2) { + RTE_LOG(CRIT, MEMPOOL, + "obj=%p, mempool=%p, cookie=%" PRIx64 "\n", + obj, (const void *) mp, cookie); + rte_panic("MEMPOOL: bad header cookie (audit)\n"); + } + } + tlr = __mempool_get_trailer(obj); + cookie = tlr->cookie; + if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) { + RTE_LOG(CRIT, MEMPOOL, + "obj=%p, mempool=%p, cookie=%" PRIx64 "\n", + obj, (const void *) mp, cookie); + rte_panic("MEMPOOL: bad trailer cookie\n"); + } + } +#else + RTE_SET_USED(mp); + RTE_SET_USED(obj_table_const); + RTE_SET_USED(n); + RTE_SET_USED(free); +#endif } -static void -mempool_audit_cookies(const struct rte_mempool *mp) +void +rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp, + void * const *first_obj_table_const, unsigned int n, int free) { - uint32_t elt_sz, num; - struct mempool_audit_arg arg; +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + struct rte_mempool_info info; + const size_t total_elt_sz = + mp->header_size + mp->elt_size + mp->trailer_size; + unsigned int i, j; + + rte_mempool_ops_get_info(mp, &info); + + for (i = 0; i < n; ++i) { + void *first_obj = first_obj_table_const[i]; + + for (j = 0; j < info.contig_block_size; ++j) { + void *obj; - elt_sz = mp->elt_size + mp->header_size + mp->trailer_size; + obj = (void *)((uintptr_t)first_obj + j * total_elt_sz); + rte_mempool_check_cookies(mp, &obj, 1, free); + } + } +#else + RTE_SET_USED(mp); + RTE_SET_USED(first_obj_table_const); + RTE_SET_USED(n); + RTE_SET_USED(free); +#endif +} - arg.mp = mp; - arg.obj_end = mp->elt_va_start; - arg.obj_num = 0; +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG +static void +mempool_obj_audit(struct rte_mempool *mp, __rte_unused void *opaque, + void *obj, __rte_unused unsigned idx) +{ + __mempool_check_cookies(mp, &obj, 1, 2); +} - num = rte_mempool_obj_iter((void *)mp->elt_va_start, - mp->size, elt_sz, 1, - mp->elt_pa, mp->pg_num, mp->pg_shift, - mempool_obj_audit, &arg); +static void +mempool_audit_cookies(struct rte_mempool *mp) +{ + unsigned num; + num = rte_mempool_obj_iter(mp, mempool_obj_audit, NULL); if (num != mp->size) { - rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) " + rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) " "iterated only over %u elements\n", mp, mp->size, num); - } else if (arg.obj_end != mp->elt_va_end || arg.obj_num != mp->size) { - rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) " - "last callback va_end: %#tx (%#tx expeceted), " - "num of objects: %u (%u expected)\n", - mp, mp->size, - arg.obj_end, mp->elt_va_end, - arg.obj_num, mp->size); } } +#else +#define mempool_audit_cookies(mp) do {} while(0) +#endif #ifndef __INTEL_COMPILER #pragma GCC diagnostic error "-Wcast-qual" #endif -#else -#define mempool_audit_cookies(mp) do {} while(0) -#endif /* check cookies before and after objects */ static void @@ -772,7 +1162,9 @@ mempool_audit_cache(const struct rte_mempool *mp) return; for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { - if (mp->local_cache[lcore_id].len > mp->cache_flushthresh) { + const struct rte_mempool_cache *cache; + cache = &mp->local_cache[lcore_id]; + if (cache->len > cache->flushthresh) { RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n", lcore_id); rte_panic("MEMPOOL: invalid cache len\n"); @@ -782,7 +1174,7 @@ mempool_audit_cache(const struct rte_mempool *mp) /* check the consistency of mempool (size, cookies, ...) */ void -rte_mempool_audit(const struct rte_mempool *mp) +rte_mempool_audit(struct rte_mempool *mp) { mempool_audit_cache(mp); mempool_audit_cookies(mp); @@ -793,23 +1185,28 @@ rte_mempool_audit(const struct rte_mempool *mp) /* dump the status of the mempool on the console */ void -rte_mempool_dump(FILE *f, const struct rte_mempool *mp) +rte_mempool_dump(FILE *f, struct rte_mempool *mp) { #ifdef RTE_LIBRTE_MEMPOOL_DEBUG + struct rte_mempool_info info; struct rte_mempool_debug_stats sum; unsigned lcore_id; #endif + struct rte_mempool_memhdr *memhdr; unsigned common_count; unsigned cache_count; + size_t mem_len = 0; RTE_ASSERT(f != NULL); RTE_ASSERT(mp != NULL); fprintf(f, "mempool <%s>@%p\n", mp->name, mp); fprintf(f, " flags=%x\n", mp->flags); - fprintf(f, " ring=<%s>@%p\n", mp->ring->name, mp->ring); - fprintf(f, " phys_addr=0x%" PRIx64 "\n", mp->phys_addr); + fprintf(f, " pool=%p\n", mp->pool_data); + fprintf(f, " iova=0x%" PRIx64 "\n", mp->mz->iova); + fprintf(f, " nb_mem_chunks=%u\n", mp->nb_mem_chunks); fprintf(f, " size=%"PRIu32"\n", mp->size); + fprintf(f, " populated_size=%"PRIu32"\n", mp->populated_size); fprintf(f, " header_size=%"PRIu32"\n", mp->header_size); fprintf(f, " elt_size=%"PRIu32"\n", mp->elt_size); fprintf(f, " trailer_size=%"PRIu32"\n", mp->trailer_size); @@ -817,26 +1214,23 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp) mp->header_size + mp->elt_size + mp->trailer_size); fprintf(f, " private_data_size=%"PRIu32"\n", mp->private_data_size); - fprintf(f, " pg_num=%"PRIu32"\n", mp->pg_num); - fprintf(f, " pg_shift=%"PRIu32"\n", mp->pg_shift); - fprintf(f, " pg_mask=%#tx\n", mp->pg_mask); - fprintf(f, " elt_va_start=%#tx\n", mp->elt_va_start); - fprintf(f, " elt_va_end=%#tx\n", mp->elt_va_end); - fprintf(f, " elt_pa[0]=0x%" PRIx64 "\n", mp->elt_pa[0]); - - if (mp->size != 0) + + STAILQ_FOREACH(memhdr, &mp->mem_list, next) + mem_len += memhdr->len; + if (mem_len != 0) { fprintf(f, " avg bytes/object=%#Lf\n", - (long double)(mp->elt_va_end - mp->elt_va_start) / - mp->size); + (long double)mem_len / mp->size); + } cache_count = rte_mempool_dump_cache(f, mp); - common_count = rte_ring_count(mp->ring); + common_count = rte_mempool_ops_get_count(mp); if ((cache_count + common_count) > mp->size) common_count = mp->size - cache_count; fprintf(f, " common_pool_count=%u\n", common_count); /* sum and dump statistics */ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG + rte_mempool_ops_get_info(mp, &info); memset(&sum, 0, sizeof(sum)); for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { sum.put_bulk += mp->stats[lcore_id].put_bulk; @@ -845,6 +1239,8 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp) sum.get_success_objs += mp->stats[lcore_id].get_success_objs; sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk; sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs; + sum.get_success_blks += mp->stats[lcore_id].get_success_blks; + sum.get_fail_blks += mp->stats[lcore_id].get_fail_blks; } fprintf(f, " stats:\n"); fprintf(f, " put_bulk=%"PRIu64"\n", sum.put_bulk); @@ -853,6 +1249,11 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp) fprintf(f, " get_success_objs=%"PRIu64"\n", sum.get_success_objs); fprintf(f, " get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk); fprintf(f, " get_fail_objs=%"PRIu64"\n", sum.get_fail_objs); + if (info.contig_block_size > 0) { + fprintf(f, " get_success_blks=%"PRIu64"\n", + sum.get_success_blks); + fprintf(f, " get_fail_blks=%"PRIu64"\n", sum.get_fail_blks); + } #else fprintf(f, " no statistics available\n"); #endif @@ -864,20 +1265,20 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp) void rte_mempool_list_dump(FILE *f) { - const struct rte_mempool *mp = NULL; + struct rte_mempool *mp = NULL; struct rte_tailq_entry *te; struct rte_mempool_list *mempool_list; mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list); - rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK); + rte_mcfg_mempool_read_lock(); TAILQ_FOREACH(te, mempool_list, next) { mp = (struct rte_mempool *) te->data; rte_mempool_dump(f, mp); } - rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK); + rte_mcfg_mempool_read_unlock(); } /* search a mempool from its name */ @@ -890,7 +1291,7 @@ rte_mempool_lookup(const char *name) mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list); - rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK); + rte_mcfg_mempool_read_lock(); TAILQ_FOREACH(te, mempool_list, next) { mp = (struct rte_mempool *) te->data; @@ -898,7 +1299,7 @@ rte_mempool_lookup(const char *name) break; } - rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK); + rte_mcfg_mempool_read_unlock(); if (te == NULL) { rte_errno = ENOENT; @@ -908,19 +1309,20 @@ rte_mempool_lookup(const char *name) return mp; } -void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *), +void rte_mempool_walk(void (*func)(struct rte_mempool *, void *), void *arg) { struct rte_tailq_entry *te = NULL; struct rte_mempool_list *mempool_list; + void *tmp_te; mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list); - rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK); + rte_mcfg_mempool_read_lock(); - TAILQ_FOREACH(te, mempool_list, next) { + TAILQ_FOREACH_SAFE(te, mempool_list, next, tmp_te) { (*func)((struct rte_mempool *) te->data, arg); } - rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK); + rte_mcfg_mempool_read_unlock(); }