X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Fmempool%2Frte_mempool.c;h=b75d26c82a00da72b6451352e4f06cf34e69c4b0;hb=28dde5da503ed09f10cdfb295e390b114df7330a;hp=59a588425bd6dddd02117bcba0420ff9a5d60cb5;hpb=cee151b41b74873bb4bbf6a86a96b37f829fc237;p=dpdk.git diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c index 59a588425b..b75d26c82a 100644 --- a/lib/mempool/rte_mempool.c +++ b/lib/mempool/rte_mempool.c @@ -42,6 +42,18 @@ static struct rte_tailq_elem rte_mempool_tailq = { }; EAL_REGISTER_TAILQ(rte_mempool_tailq) +TAILQ_HEAD(mempool_callback_list, rte_tailq_entry); + +static struct rte_tailq_elem callback_tailq = { + .name = "RTE_MEMPOOL_CALLBACK", +}; +EAL_REGISTER_TAILQ(callback_tailq) + +/* Invoke all registered mempool event callbacks. */ +static void +mempool_event_callback_invoke(enum rte_mempool_event event, + struct rte_mempool *mp); + #define CACHE_FLUSHTHRESH_MULTIPLIER 1.5 #define CALC_CACHE_FLUSHTHRESH(c) \ ((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER)) @@ -167,7 +179,7 @@ mempool_add_elem(struct rte_mempool *mp, __rte_unused void *opaque, #ifdef RTE_LIBRTE_MEMPOOL_DEBUG hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2; - tlr = __mempool_get_trailer(obj); + tlr = rte_mempool_get_trailer(obj); tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE; #endif } @@ -216,7 +228,7 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, sz = (sz != NULL) ? sz : &lsz; sz->header_size = sizeof(struct rte_mempool_objhdr); - if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) + if ((flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) == 0) sz->header_size = RTE_ALIGN_CEIL(sz->header_size, RTE_MEMPOOL_ALIGN); @@ -230,7 +242,7 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t)); /* expand trailer to next cache line */ - if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) { + if ((flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) == 0) { sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size; sz->trailer_size += ((RTE_MEMPOOL_ALIGN - @@ -242,7 +254,7 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, * increase trailer to add padding between objects in order to * spread them across memory channels/ranks */ - if ((flags & MEMPOOL_F_NO_SPREAD) == 0) { + if ((flags & RTE_MEMPOOL_F_NO_SPREAD) == 0) { unsigned new_size; new_size = arch_mem_object_align (sz->header_size + sz->elt_size + sz->trailer_size); @@ -294,11 +306,11 @@ mempool_ops_alloc_once(struct rte_mempool *mp) int ret; /* create the internal ring if not already done */ - if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) { + if ((mp->flags & RTE_MEMPOOL_F_POOL_CREATED) == 0) { ret = rte_mempool_ops_alloc(mp); if (ret != 0) return ret; - mp->flags |= MEMPOOL_F_POOL_CREATED; + mp->flags |= RTE_MEMPOOL_F_POOL_CREATED; } return 0; } @@ -336,7 +348,7 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, memhdr->free_cb = free_cb; memhdr->opaque = opaque; - if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) + if (mp->flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr; else off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_MEMPOOL_ALIGN) - vaddr; @@ -360,6 +372,14 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next); mp->nb_mem_chunks++; + /* At least some objects in the pool can now be used for IO. */ + if (iova != RTE_BAD_IOVA) + mp->flags &= ~RTE_MEMPOOL_F_NON_IO; + + /* Report the mempool as ready only when fully populated. */ + if (mp->populated_size >= mp->size) + mempool_event_callback_invoke(RTE_MEMPOOL_EVENT_READY, mp); + rte_mempool_trace_populate_iova(mp, vaddr, iova, len, free_cb, opaque); return i; @@ -393,7 +413,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t off, phys_len; int ret, cnt = 0; - if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) + if (mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG) return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA, len, free_cb, opaque); @@ -450,7 +470,7 @@ rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz) if (ret < 0) return -EINVAL; alloc_in_ext_mem = (ret == 1); - need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG); + need_iova_contig_obj = !(mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG); if (!need_iova_contig_obj) *pg_sz = 0; @@ -527,7 +547,7 @@ rte_mempool_populate_default(struct rte_mempool *mp) * reserve space in smaller chunks. */ - need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG); + need_iova_contig_obj = !(mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG); ret = rte_mempool_get_page_size(mp, &pg_sz); if (ret < 0) return ret; @@ -722,6 +742,7 @@ rte_mempool_free(struct rte_mempool *mp) } rte_mcfg_tailq_write_unlock(); + mempool_event_callback_invoke(RTE_MEMPOOL_EVENT_DESTROY, mp); rte_mempool_trace_free(mp); rte_mempool_free_memchunks(mp); rte_mempool_ops_free(mp); @@ -821,9 +842,21 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, return NULL; } + /* enforce only user flags are passed by the application */ + if ((flags & ~RTE_MEMPOOL_VALID_USER_FLAGS) != 0) { + rte_errno = EINVAL; + return NULL; + } + + /* + * No objects in the pool can be used for IO until it's populated + * with at least some objects with valid IOVA. + */ + flags |= RTE_MEMPOOL_F_NON_IO; + /* "no cache align" imply "no spread" */ - if (flags & MEMPOOL_F_NO_CACHE_ALIGN) - flags |= MEMPOOL_F_NO_SPREAD; + if (flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) + flags |= RTE_MEMPOOL_F_NO_SPREAD; /* calculate mempool object sizes. */ if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) { @@ -848,7 +881,7 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, goto exit_unlock; } - mempool_size = MEMPOOL_HEADER_SIZE(mp, cache_size); + mempool_size = RTE_MEMPOOL_HEADER_SIZE(mp, cache_size); mempool_size += private_data_size; mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN); @@ -864,7 +897,7 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, /* init the mempool structure */ mp = mz->addr; - memset(mp, 0, MEMPOOL_HEADER_SIZE(mp, cache_size)); + memset(mp, 0, RTE_MEMPOOL_HEADER_SIZE(mp, cache_size)); ret = strlcpy(mp->name, name, sizeof(mp->name)); if (ret < 0 || ret >= (int)sizeof(mp->name)) { rte_errno = ENAMETOOLONG; @@ -888,7 +921,7 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, * The local_cache points to just past the elt_pa[] array. */ mp->local_cache = (struct rte_mempool_cache *) - RTE_PTR_ADD(mp, MEMPOOL_HEADER_SIZE(mp, 0)); + RTE_PTR_ADD(mp, RTE_MEMPOOL_HEADER_SIZE(mp, 0)); /* Init all default caches. */ if (cache_size != 0) { @@ -935,11 +968,11 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to * set the correct index into the table of ops structs. */ - if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET)) + if ((flags & RTE_MEMPOOL_F_SP_PUT) && (flags & RTE_MEMPOOL_F_SC_GET)) ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); - else if (flags & MEMPOOL_F_SP_PUT) + else if (flags & RTE_MEMPOOL_F_SP_PUT) ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); - else if (flags & MEMPOOL_F_SC_GET) + else if (flags & RTE_MEMPOOL_F_SC_GET) ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL); else ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); @@ -1051,7 +1084,7 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp, rte_panic("MEMPOOL: object is owned by another " "mempool\n"); - hdr = __mempool_get_header(obj); + hdr = rte_mempool_get_header(obj); cookie = hdr->cookie; if (free == 0) { @@ -1079,7 +1112,7 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp, rte_panic("MEMPOOL: bad header cookie (audit)\n"); } } - tlr = __mempool_get_trailer(obj); + tlr = rte_mempool_get_trailer(obj); cookie = tlr->cookie; if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) { RTE_LOG(CRIT, MEMPOOL, @@ -1131,7 +1164,7 @@ static void mempool_obj_audit(struct rte_mempool *mp, __rte_unused void *opaque, void *obj, __rte_unused unsigned idx) { - __mempool_check_cookies(mp, &obj, 1, 2); + RTE_MEMPOOL_CHECK_COOKIES(mp, &obj, 1, 2); } static void @@ -1337,9 +1370,116 @@ void rte_mempool_walk(void (*func)(struct rte_mempool *, void *), rte_mcfg_mempool_read_lock(); - TAILQ_FOREACH_SAFE(te, mempool_list, next, tmp_te) { + RTE_TAILQ_FOREACH_SAFE(te, mempool_list, next, tmp_te) { (*func)((struct rte_mempool *) te->data, arg); } rte_mcfg_mempool_read_unlock(); } + +struct mempool_callback_data { + rte_mempool_event_callback *func; + void *user_data; +}; + +static void +mempool_event_callback_invoke(enum rte_mempool_event event, + struct rte_mempool *mp) +{ + struct mempool_callback_list *list; + struct rte_tailq_entry *te; + void *tmp_te; + + rte_mcfg_tailq_read_lock(); + list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list); + RTE_TAILQ_FOREACH_SAFE(te, list, next, tmp_te) { + struct mempool_callback_data *cb = te->data; + rte_mcfg_tailq_read_unlock(); + cb->func(event, mp, cb->user_data); + rte_mcfg_tailq_read_lock(); + } + rte_mcfg_tailq_read_unlock(); +} + +int +rte_mempool_event_callback_register(rte_mempool_event_callback *func, + void *user_data) +{ + struct mempool_callback_list *list; + struct rte_tailq_entry *te = NULL; + struct mempool_callback_data *cb; + void *tmp_te; + int ret; + + if (func == NULL) { + rte_errno = EINVAL; + return -rte_errno; + } + + rte_mcfg_tailq_write_lock(); + list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list); + RTE_TAILQ_FOREACH_SAFE(te, list, next, tmp_te) { + cb = te->data; + if (cb->func == func && cb->user_data == user_data) { + ret = -EEXIST; + goto exit; + } + } + + te = rte_zmalloc("mempool_cb_tail_entry", sizeof(*te), 0); + if (te == NULL) { + RTE_LOG(ERR, MEMPOOL, + "Cannot allocate event callback tailq entry!\n"); + ret = -ENOMEM; + goto exit; + } + + cb = rte_malloc("mempool_cb_data", sizeof(*cb), 0); + if (cb == NULL) { + RTE_LOG(ERR, MEMPOOL, + "Cannot allocate event callback!\n"); + rte_free(te); + ret = -ENOMEM; + goto exit; + } + + cb->func = func; + cb->user_data = user_data; + te->data = cb; + TAILQ_INSERT_TAIL(list, te, next); + ret = 0; + +exit: + rte_mcfg_tailq_write_unlock(); + rte_errno = -ret; + return ret; +} + +int +rte_mempool_event_callback_unregister(rte_mempool_event_callback *func, + void *user_data) +{ + struct mempool_callback_list *list; + struct rte_tailq_entry *te = NULL; + struct mempool_callback_data *cb; + int ret = -ENOENT; + + rte_mcfg_tailq_write_lock(); + list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list); + TAILQ_FOREACH(te, list, next) { + cb = te->data; + if (cb->func == func && cb->user_data == user_data) { + TAILQ_REMOVE(list, te, next); + ret = 0; + break; + } + } + rte_mcfg_tailq_write_unlock(); + + if (ret == 0) { + rte_free(te); + rte_free(cb); + } + rte_errno = -ret; + return ret; +}