X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.c;h=f8d453d21f8fb6635c772db583d5dd1696dc6fa7;hb=f159c61c35ebc6ff18d2c6c2cb07c177935a96e3;hp=758c5410b4499fa06b0b1f34116ac14d2c06bc84;hpb=b291e69423d0cbfdc34411460b641215da2e3fdc;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 758c5410b4..f8d453d21f 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -329,7 +329,7 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr; else - off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr; + off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_MEMPOOL_ALIGN) - vaddr; if (off > len) { ret = -EINVAL; @@ -356,6 +356,19 @@ fail: return ret; } +static rte_iova_t +get_iova(void *addr) +{ + struct rte_memseg *ms; + + /* try registered memory first */ + ms = rte_mem_virt2memseg(addr, NULL); + if (ms == NULL || ms->iova == RTE_BAD_IOVA) + /* fall back to actual physical address */ + return rte_mem_virt2iova(addr); + return ms->iova + RTE_PTR_DIFF(addr, ms->addr); +} + /* Populate the mempool with a virtual area. Return the number of * objects added, or a negative value on error. */ @@ -375,12 +388,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, for (off = 0; off < len && mp->populated_size < mp->size; off += phys_len) { - iova = rte_mem_virt2iova(addr + off); - - if (iova == RTE_BAD_IOVA && rte_eal_has_hugepages()) { - ret = -EINVAL; - goto fail; - } + iova = get_iova(addr + off); /* populate with the largest group of contiguous pages */ for (phys_len = RTE_MIN( @@ -391,7 +399,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, phys_len = RTE_MIN(phys_len + pg_sz, len - off)) { rte_iova_t iova_tmp; - iova_tmp = rte_mem_virt2iova(addr + off + phys_len); + iova_tmp = get_iova(addr + off + phys_len); if (iova_tmp == RTE_BAD_IOVA || iova_tmp != iova + phys_len) @@ -431,8 +439,6 @@ rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz) if (!need_iova_contig_obj) *pg_sz = 0; - else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) - *pg_sz = 0; else if (rte_eal_has_hugepages() || alloc_in_ext_mem) *pg_sz = get_min_page_size(mp->socket_id); else @@ -481,17 +487,15 @@ rte_mempool_populate_default(struct rte_mempool *mp) * then just set page shift and page size to 0, because the user has * indicated that there's no need to care about anything. * - * if we do need contiguous objects, there is also an option to reserve - * the entire mempool memory as one contiguous block of memory, in - * which case the page shift and alignment wouldn't matter as well. + * if we do need contiguous objects (if a mempool driver has its + * own calc_size() method returning min_chunk_size = mem_size), + * there is also an option to reserve the entire mempool memory + * as one contiguous block of memory. * * if we require contiguous objects, but not necessarily the entire - * mempool reserved space to be contiguous, then there are two options. - * - * if our IO addresses are virtual, not actual physical (IOVA as VA - * case), then no page shift needed - our memory allocation will give us - * contiguous IO memory as far as the hardware is concerned, so - * act as if we're getting contiguous memory. + * mempool reserved space to be contiguous, pg_sz will be != 0, + * and the default ops->populate() will take care of not placing + * objects across pages. * * if our IO addresses are physical, we may get memory from bigger * pages, or we might get memory from smaller pages, and how much of it @@ -504,11 +508,6 @@ rte_mempool_populate_default(struct rte_mempool *mp) * * If we fail to get enough contiguous memory, then we'll go and * reserve space in smaller chunks. - * - * We also have to take into account the fact that memory that we're - * going to allocate from can belong to an externally allocated memory - * area, in which case the assumption of IOVA as VA mode being - * synonymous with IOVA contiguousness will not hold. */ need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG); @@ -646,8 +645,10 @@ rte_mempool_populate_anon(struct rte_mempool *mp) } ret = mempool_ops_alloc_once(mp); - if (ret != 0) - return ret; + if (ret < 0) { + rte_errno = -ret; + return 0; + } size = get_anon_size(mp); if (size < 0) { @@ -671,8 +672,10 @@ rte_mempool_populate_anon(struct rte_mempool *mp) ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(), rte_mempool_memchunk_anon_free, addr); - if (ret == 0) + if (ret < 0) { + rte_errno = -ret; goto fail; + } return mp->populated_size;