X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.c;h=57a4f8122a2b606c58d3d0eb83f39f36ec82272b;hb=e735c8e20b9c727fd60f9ae1fa86114594c6ec76;hp=237665c65bc951a8ad94b3c65be777022ff79069;hpb=0cc0f8aaa35dc4b797d9226c51c1c5aba17a7b2d;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 237665c65b..57a4f8122a 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -238,9 +238,16 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, * Calculate maximum amount of memory required to store given number of objects. */ size_t -rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift) +rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift, + unsigned int flags) { size_t obj_per_page, pg_num, pg_sz; + unsigned int mask; + + mask = MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS | MEMPOOL_F_CAPA_PHYS_CONTIG; + if ((flags & mask) == mask) + /* alignment need one additional object */ + elt_num += 1; if (total_elt_sz == 0) return 0; @@ -264,12 +271,18 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift) ssize_t rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num, size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num, - uint32_t pg_shift) + uint32_t pg_shift, unsigned int flags) { uint32_t elt_cnt = 0; phys_addr_t start, end; uint32_t paddr_idx; size_t pg_sz = (size_t)1 << pg_shift; + unsigned int mask; + + mask = MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS | MEMPOOL_F_CAPA_PHYS_CONTIG; + if ((flags & mask) == mask) + /* alignment need one additional object */ + elt_num += 1; /* if paddr is NULL, assume contiguous memory */ if (paddr == NULL) { @@ -354,6 +367,11 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, struct rte_mempool_memhdr *memhdr; int ret; + /* Notify memory area to mempool */ + ret = rte_mempool_ops_register_memory_area(mp, vaddr, paddr, len); + if (ret != -ENOTSUP && ret < 0) + return ret; + /* create the internal ring if not already done */ if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) { ret = rte_mempool_ops_alloc(mp); @@ -368,6 +386,16 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; + /* Detect pool area has sufficient space for elements */ + if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) { + if (len < total_elt_sz * mp->size) { + RTE_LOG(ERR, MEMPOOL, + "pool area %" PRIx64 " not enough\n", + (uint64_t)len); + return -ENOSPC; + } + } + memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0); if (memhdr == NULL) return -ENOMEM; @@ -379,7 +407,10 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, memhdr->free_cb = free_cb; memhdr->opaque = opaque; - if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) + if (mp->flags & MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS) + /* align object start address to a multiple of total_elt_sz */ + off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz); + else if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr; else off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr; @@ -473,8 +504,6 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, mp->populated_size < mp->size; off += phys_len) { paddr = rte_mem_virt2phy(addr + off); - /* required for xen_dom0 to get the machine address */ - paddr = rte_mem_phy2mch(-1, paddr); if (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) { ret = -EINVAL; @@ -486,7 +515,6 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, phys_addr_t paddr_tmp; paddr_tmp = rte_mem_virt2phy(addr + off + phys_len); - paddr_tmp = rte_mem_phy2mch(-1, paddr_tmp); if (paddr_tmp != paddr + phys_len) break; @@ -521,17 +549,26 @@ rte_mempool_populate_default(struct rte_mempool *mp) size_t size, total_elt_sz, align, pg_sz, pg_shift; phys_addr_t paddr; unsigned mz_id, n; + unsigned int mp_flags; int ret; /* mempool must not be populated */ if (mp->nb_mem_chunks != 0) return -EEXIST; - if (rte_xen_dom0_supported()) { - pg_sz = RTE_PGSIZE_2M; - pg_shift = rte_bsf32(pg_sz); - align = pg_sz; - } else if (rte_eal_has_hugepages()) { + /* Get mempool capabilities */ + mp_flags = 0; + ret = rte_mempool_ops_get_capabilities(mp, &mp_flags); + if (ret == -ENOTSUP) + RTE_LOG(DEBUG, MEMPOOL, "get_capability not supported for %s\n", + mp->name); + else if (ret < 0) + return ret; + + /* update mempool capabilities */ + mp->flags |= mp_flags; + + if (rte_eal_has_hugepages()) { pg_shift = 0; /* not needed, zone is physically contiguous */ pg_sz = 0; align = RTE_CACHE_LINE_SIZE; @@ -543,7 +580,8 @@ rte_mempool_populate_default(struct rte_mempool *mp) total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) { - size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift); + size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift, + mp->flags); ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id); @@ -568,7 +606,7 @@ rte_mempool_populate_default(struct rte_mempool *mp) else paddr = mz->phys_addr; - if (rte_eal_has_hugepages() && !rte_xen_dom0_supported()) + if (rte_eal_has_hugepages()) ret = rte_mempool_populate_phys(mp, mz->addr, paddr, mz->len, rte_mempool_memchunk_mz_free, @@ -600,7 +638,8 @@ get_anon_size(const struct rte_mempool *mp) pg_sz = getpagesize(); pg_shift = rte_bsf32(pg_sz); total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; - size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift); + size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift, + mp->flags); return size; }