X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=lib%2Flibrte_mempool%2Frte_mempool.c;h=412bda16e1cbd17a64ebd632f230b60b1690aaf3;hb=1950bd76946e940d3e6e89df24e5ac4d000e747c;hp=1f1731630856996738e94ca599fe74f9439d3cc2;hpb=f03723017a2a5ea421df821eb0ff9a0bfcacff4f;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 1f17316308..412bda16e1 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -238,9 +238,16 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, * Calculate maximum amount of memory required to store given number of objects. */ size_t -rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift) +rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift, + unsigned int flags) { size_t obj_per_page, pg_num, pg_sz; + unsigned int mask; + + mask = MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS | MEMPOOL_F_CAPA_PHYS_CONTIG; + if ((flags & mask) == mask) + /* alignment need one additional object */ + elt_num += 1; if (total_elt_sz == 0) return 0; @@ -264,12 +271,18 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift) ssize_t rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num, size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num, - uint32_t pg_shift) + uint32_t pg_shift, unsigned int flags) { uint32_t elt_cnt = 0; phys_addr_t start, end; uint32_t paddr_idx; size_t pg_sz = (size_t)1 << pg_shift; + unsigned int mask; + + mask = MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS | MEMPOOL_F_CAPA_PHYS_CONTIG; + if ((flags & mask) == mask) + /* alignment need one additional object */ + elt_num += 1; /* if paddr is NULL, assume contiguous memory */ if (paddr == NULL) { @@ -354,6 +367,11 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, struct rte_mempool_memhdr *memhdr; int ret; + /* Notify memory area to mempool */ + ret = rte_mempool_ops_register_memory_area(mp, vaddr, paddr, len); + if (ret != -ENOTSUP && ret < 0) + return ret; + /* create the internal ring if not already done */ if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) { ret = rte_mempool_ops_alloc(mp); @@ -368,6 +386,16 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; + /* Detect pool area has sufficient space for elements */ + if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) { + if (len < total_elt_sz * mp->size) { + RTE_LOG(ERR, MEMPOOL, + "pool area %" PRIx64 " not enough\n", + (uint64_t)len); + return -ENOSPC; + } + } + memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0); if (memhdr == NULL) return -ENOMEM; @@ -379,7 +407,10 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, memhdr->free_cb = free_cb; memhdr->opaque = opaque; - if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) + if (mp->flags & MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS) + /* align object start address to a multiple of total_elt_sz */ + off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz); + else if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr; else off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr; @@ -428,7 +459,7 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, /* populate with the largest group of contiguous pages */ for (n = 1; (i + n) < pg_num && - paddr[i] + pg_sz == paddr[i+n]; n++) + paddr[i + n - 1] + pg_sz == paddr[i + n]; n++) ; ret = rte_mempool_populate_phys(mp, vaddr + i * pg_sz, @@ -476,7 +507,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, /* required for xen_dom0 to get the machine address */ paddr = rte_mem_phy2mch(-1, paddr); - if (paddr == RTE_BAD_PHYS_ADDR) { + if (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) { ret = -EINVAL; goto fail; } @@ -515,23 +546,32 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, int rte_mempool_populate_default(struct rte_mempool *mp) { - int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; + unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; char mz_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; size_t size, total_elt_sz, align, pg_sz, pg_shift; phys_addr_t paddr; unsigned mz_id, n; + unsigned int mp_flags; int ret; /* mempool must not be populated */ if (mp->nb_mem_chunks != 0) return -EEXIST; - if (rte_xen_dom0_supported()) { - pg_sz = RTE_PGSIZE_2M; - pg_shift = rte_bsf32(pg_sz); - align = pg_sz; - } else if (rte_eal_has_hugepages()) { + /* Get mempool capabilities */ + mp_flags = 0; + ret = rte_mempool_ops_get_capabilities(mp, &mp_flags); + if (ret == -ENOTSUP) + RTE_LOG(DEBUG, MEMPOOL, "get_capability not supported for %s\n", + mp->name); + else if (ret < 0) + return ret; + + /* update mempool capabilities */ + mp->flags |= mp_flags; + + if (rte_eal_has_hugepages()) { pg_shift = 0; /* not needed, zone is physically contiguous */ pg_sz = 0; align = RTE_CACHE_LINE_SIZE; @@ -543,7 +583,8 @@ rte_mempool_populate_default(struct rte_mempool *mp) total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) { - size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift); + size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift, + mp->flags); ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id); @@ -568,7 +609,7 @@ rte_mempool_populate_default(struct rte_mempool *mp) else paddr = mz->phys_addr; - if (rte_eal_has_hugepages() && !rte_xen_dom0_supported()) + if (rte_eal_has_hugepages()) ret = rte_mempool_populate_phys(mp, mz->addr, paddr, mz->len, rte_mempool_memchunk_mz_free, @@ -578,8 +619,10 @@ rte_mempool_populate_default(struct rte_mempool *mp) mz->len, pg_sz, rte_mempool_memchunk_mz_free, (void *)(uintptr_t)mz); - if (ret < 0) + if (ret < 0) { + rte_memzone_free(mz); goto fail; + } } return mp->size; @@ -598,7 +641,8 @@ get_anon_size(const struct rte_mempool *mp) pg_sz = getpagesize(); pg_shift = rte_bsf32(pg_sz); total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; - size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift); + size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift, + mp->flags); return size; } @@ -740,7 +784,7 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, struct rte_tailq_entry *te = NULL; const struct rte_memzone *mz = NULL; size_t mempool_size; - int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; + unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; struct rte_mempool_objsz objsz; unsigned lcore_id; int ret; @@ -816,7 +860,6 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, goto exit_unlock; } mp->mz = mz; - mp->socket_id = socket_id; mp->size = n; mp->flags = flags; mp->socket_id = socket_id; @@ -867,6 +910,7 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags) { + int ret; struct rte_mempool *mp; mp = rte_mempool_create_empty(name, n, elt_size, cache_size, @@ -878,14 +922,17 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to * set the correct index into the table of ops structs. */ - if (flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) - rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); + if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET)) + ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); else if (flags & MEMPOOL_F_SP_PUT) - rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); + ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); else if (flags & MEMPOOL_F_SC_GET) - rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL); + ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL); else - rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); + ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); + + if (ret) + goto fail; /* call the mempool priv initializer */ if (mp_init) @@ -908,9 +955,8 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, /* * Create the mempool over already allocated chunk of memory. * That external memory buffer can consists of physically disjoint pages. - * Setting vaddr to NULL, makes mempool to fallback to original behaviour - * and allocate space for mempool and it's elements as one big chunk of - * physically continuos memory. + * Setting vaddr to NULL, makes mempool to fallback to rte_mempool_create() + * behavior. */ struct rte_mempool * rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, @@ -997,12 +1043,6 @@ rte_mempool_in_use_count(const struct rte_mempool *mp) return mp->size - rte_mempool_avail_count(mp); } -unsigned int -rte_mempool_count(const struct rte_mempool *mp) -{ - return rte_mempool_avail_count(mp); -} - /* dump the cache status */ static unsigned rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp) @@ -1046,7 +1086,7 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp, /* Force to drop the "const" attribute. This is done only when * DEBUG is enabled */ tmp = (void *) obj_table_const; - obj_table = (void **) tmp; + obj_table = tmp; while (n--) { obj = obj_table[n];