X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.c;h=8c8b9f8097db1b18e09c61f254e9602f35ac65c6;hb=006bb2cc209f6ba11230743c0402e6c29a29efc8;hp=5c75c16948bbabc4e362055f21911168e6906cea;hpb=ce1f2c61ed135e4133d0429e86e554bfd4d58cb0;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 5c75c16948..8c8b9f8097 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -227,11 +227,13 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, /* - * Calculate maximum amount of memory required to store given number of objects. + * Internal function to calculate required memory chunk size shared + * by default implementation of the corresponding callback and + * deprecated external function. */ size_t -rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift, - __rte_unused unsigned int flags) +rte_mempool_calc_mem_size_helper(uint32_t elt_num, size_t total_elt_sz, + uint32_t pg_shift) { size_t obj_per_page, pg_num, pg_sz; @@ -250,6 +252,17 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift, return pg_num << pg_shift; } +/* + * Calculate maximum amount of memory required to store given number of objects. + */ +size_t +rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift, + __rte_unused unsigned int flags) +{ + return rte_mempool_calc_mem_size_helper(elt_num, total_elt_sz, + pg_shift); +} + /* * Calculate how much memory would be actually required with the * given memory footprint to store required number of elements. @@ -365,11 +378,6 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, if (ret != 0) return ret; - /* Notify memory area to mempool */ - ret = rte_mempool_ops_register_memory_area(mp, vaddr, iova, len); - if (ret != -ENOTSUP && ret < 0) - return ret; - /* mempool is already populated */ if (mp->populated_size >= mp->size) return -ENOSPC; @@ -484,9 +492,6 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t off, phys_len; int ret, cnt = 0; - /* mempool must not be populated */ - if (mp->nb_mem_chunks != 0) - return -EEXIST; /* address and len must be page-aligned */ if (RTE_PTR_ALIGN_CEIL(addr, pg_sz) != addr) return -EINVAL; @@ -676,7 +681,8 @@ rte_mempool_populate_default(struct rte_mempool *mp) * have */ mz = rte_memzone_reserve_aligned(mz_name, 0, - mp->socket_id, flags, align); + mp->socket_id, flags, + RTE_MAX(pg_sz, align)); } if (mz == NULL) { ret = -rte_errno; @@ -701,7 +707,7 @@ rte_mempool_populate_default(struct rte_mempool *mp) (void *)(uintptr_t)mz); else ret = rte_mempool_populate_virt(mp, mz->addr, - mz->len, pg_sz, + RTE_ALIGN_FLOOR(mz->len, pg_sz), pg_sz, rte_mempool_memchunk_mz_free, (void *)(uintptr_t)mz); if (ret < 0) { @@ -762,7 +768,7 @@ rte_mempool_populate_anon(struct rte_mempool *mp) char *addr; /* mempool is already populated, error */ - if (!STAILQ_EMPTY(&mp->mem_list)) { + if ((!STAILQ_EMPTY(&mp->mem_list)) || mp->nb_mem_chunks != 0) { rte_errno = EINVAL; return 0; } @@ -1247,6 +1253,36 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp, #endif } +void +rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp, + void * const *first_obj_table_const, unsigned int n, int free) +{ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + struct rte_mempool_info info; + const size_t total_elt_sz = + mp->header_size + mp->elt_size + mp->trailer_size; + unsigned int i, j; + + rte_mempool_ops_get_info(mp, &info); + + for (i = 0; i < n; ++i) { + void *first_obj = first_obj_table_const[i]; + + for (j = 0; j < info.contig_block_size; ++j) { + void *obj; + + obj = (void *)((uintptr_t)first_obj + j * total_elt_sz); + rte_mempool_check_cookies(mp, &obj, 1, free); + } + } +#else + RTE_SET_USED(mp); + RTE_SET_USED(first_obj_table_const); + RTE_SET_USED(n); + RTE_SET_USED(free); +#endif +} + #ifdef RTE_LIBRTE_MEMPOOL_DEBUG static void mempool_obj_audit(struct rte_mempool *mp, __rte_unused void *opaque, @@ -1312,6 +1348,7 @@ void rte_mempool_dump(FILE *f, struct rte_mempool *mp) { #ifdef RTE_LIBRTE_MEMPOOL_DEBUG + struct rte_mempool_info info; struct rte_mempool_debug_stats sum; unsigned lcore_id; #endif @@ -1353,6 +1390,7 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp) /* sum and dump statistics */ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG + rte_mempool_ops_get_info(mp, &info); memset(&sum, 0, sizeof(sum)); for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { sum.put_bulk += mp->stats[lcore_id].put_bulk; @@ -1361,6 +1399,8 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp) sum.get_success_objs += mp->stats[lcore_id].get_success_objs; sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk; sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs; + sum.get_success_blks += mp->stats[lcore_id].get_success_blks; + sum.get_fail_blks += mp->stats[lcore_id].get_fail_blks; } fprintf(f, " stats:\n"); fprintf(f, " put_bulk=%"PRIu64"\n", sum.put_bulk); @@ -1369,6 +1409,11 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp) fprintf(f, " get_success_objs=%"PRIu64"\n", sum.get_success_objs); fprintf(f, " get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk); fprintf(f, " get_fail_objs=%"PRIu64"\n", sum.get_fail_objs); + if (info.contig_block_size > 0) { + fprintf(f, " get_success_blks=%"PRIu64"\n", + sum.get_success_blks); + fprintf(f, " get_fail_blks=%"PRIu64"\n", sum.get_fail_blks); + } #else fprintf(f, " no statistics available\n"); #endif