From: Damien Millescamps Date: Fri, 1 Mar 2013 15:10:57 +0000 (+0100) Subject: mem: fix mempool for --no-huge X-Git-Tag: spdx-start~10946 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=1896b4ec5e7ad5089fa17120bebf17d5dea8f476;p=dpdk.git mem: fix mempool for --no-huge In --no-huge mode, mempool provides objects with their associated header/trailer fitting in a standard page (usually 4KB). This means all non-UIO driver should work correctly in this mode, since UIO drivers allocate ring sizes that cannot fit in a page. Extend rte_mempool_virt2phy to obtain the correct physical address when elements of the pool are not on the same physically contiguous memory region. Reason for this patch is to be able to run on a kernel < 2.6.37 without the need to patch it, since all kernel below are either bugged or don't have huge page support at all (< 2.6.28). Signed-off-by: Damien Millescamps Acked-by: Adrien Mazarguil --- diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c index 98477f99de..296f17272c 100644 --- a/lib/librte_eal/linuxapp/eal/eal_memory.c +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c @@ -989,7 +989,7 @@ rte_eal_hugepage_init(void) /* get pointer to global configuration */ mcfg = rte_eal_get_configuration()->mem_config; - /* for debug purposes, hugetlbfs can be disabled */ + /* hugetlbfs can be disabled */ if (internal_config.no_hugetlbfs) { addr = malloc(internal_config.memory); mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr; diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 1132440793..fdc1586b9e 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -284,6 +285,23 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, sz->trailer_size = new_size - sz->header_size - sz->elt_size; } + if (! rte_eal_has_hugepages()) { + /* + * compute trailer size so that pool elements fit exactly in + * a standard page + */ + int page_size = getpagesize(); + int new_size = page_size - sz->header_size - sz->elt_size; + if (new_size < 0 || (unsigned int)new_size < sz->trailer_size) { + printf("When hugepages are disabled, pool objects " + "can't exceed PAGE_SIZE: %d + %d + %d > %d\n", + sz->header_size, sz->elt_size, sz->trailer_size, + page_size); + return 0; + } + sz->trailer_size = new_size; + } + /* this is the size of an object, including header and trailer */ sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size; @@ -392,8 +410,10 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, size_t mempool_size; int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; int rg_flags = 0; - void *obj; + void *obj; struct rte_mempool_objsz objsz; + void *startaddr; + int page_size = getpagesize(); /* compilation-time checks */ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) & @@ -447,7 +467,10 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, rg_flags |= RING_F_SC_DEQ; /* calculate mempool object sizes. */ - rte_mempool_calc_obj_size(elt_size, flags, &objsz); + if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) { + rte_errno = EINVAL; + return NULL; + } rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK); @@ -467,6 +490,18 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, private_data_size = (private_data_size + CACHE_LINE_MASK) & (~CACHE_LINE_MASK); + if (! rte_eal_has_hugepages()) { + /* + * expand private data size to a whole page, so that the + * first pool element will start on a new standard page + */ + int head = sizeof(struct rte_mempool); + int new_size = (private_data_size + head) % page_size; + if (new_size) { + private_data_size += page_size - new_size; + } + } + /* * If user provided an external memory buffer, then use it to * store mempool objects. Otherwise reserve memzone big enough to @@ -476,6 +511,15 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, if (vaddr == NULL) mempool_size += (size_t)objsz.total_size * n; + if (! rte_eal_has_hugepages()) { + /* + * we want the memory pool to start on a page boundary, + * because pool elements crossing page boundaries would + * result in discontiguous physical addresses + */ + mempool_size += page_size; + } + rte_snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name); mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags); @@ -487,8 +531,20 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, if (mz == NULL) goto exit; + if (rte_eal_has_hugepages()) { + startaddr = (void*)mz->addr; + } else { + /* align memory pool start address on a page boundary */ + unsigned long addr = (unsigned long)mz->addr; + if (addr & (page_size - 1)) { + addr += page_size; + addr &= ~(page_size - 1); + } + startaddr = (void*)addr; + } + /* init the mempool structure */ - mp = mz->addr; + mp = startaddr; memset(mp, 0, sizeof(*mp)); rte_snprintf(mp->name, sizeof(mp->name), "%s", name); mp->phys_addr = mz->phys_addr; diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index ae13f8b0d0..7668671c60 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -1255,13 +1255,21 @@ rte_mempool_empty(const struct rte_mempool *mp) static inline phys_addr_t rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt) { - uintptr_t off; - - off = (const char *)elt - (const char *)mp->elt_va_start; - return (mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask)); + if (rte_eal_has_hugepages()) { + uintptr_t off; + + off = (const char *)elt - (const char *)mp->elt_va_start; + return (mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask)); + } else { + /* + * If huge pages are disabled, we cannot assume the + * memory region to be physically contiguous. + * Lookup for each element. + */ + return rte_mem_virt2phy(elt); + } } - /** * Check the consistency of mempool objects. *