X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fmempool%2Fstack%2Frte_mempool_stack.c;h=7e85c8d6b6f8e0752686d6ad1b7d905e68783eff;hb=cee221f670440d3e2ee32f9a67344251f5dee1f6;hp=e6d504af54676a4f1a00d284ed884cb1340171d0;hpb=5566a3e35866ce9e5eacf886c27b460ebfcd6ee9;p=dpdk.git diff --git a/drivers/mempool/stack/rte_mempool_stack.c b/drivers/mempool/stack/rte_mempool_stack.c index e6d504af54..7e85c8d6b6 100644 --- a/drivers/mempool/stack/rte_mempool_stack.c +++ b/drivers/mempool/stack/rte_mempool_stack.c @@ -1,109 +1,78 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016 Intel Corporation + * Copyright(c) 2016-2019 Intel Corporation */ #include #include -#include - -struct rte_mempool_stack { - rte_spinlock_t sl; - - uint32_t size; - uint32_t len; - void *objs[]; -}; +#include static int -stack_alloc(struct rte_mempool *mp) +__stack_alloc(struct rte_mempool *mp, uint32_t flags) { - struct rte_mempool_stack *s; - unsigned n = mp->size; - int size = sizeof(*s) + (n+16)*sizeof(void *); - - /* Allocate our local memory structure */ - s = rte_zmalloc_socket("mempool-stack", - size, - RTE_CACHE_LINE_SIZE, - mp->socket_id); - if (s == NULL) { - RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n"); - return -ENOMEM; + char name[RTE_STACK_NAMESIZE]; + struct rte_stack *s; + int ret; + + ret = snprintf(name, sizeof(name), + RTE_MEMPOOL_MZ_FORMAT, mp->name); + if (ret < 0 || ret >= (int)sizeof(name)) { + rte_errno = ENAMETOOLONG; + return -rte_errno; } - rte_spinlock_init(&s->sl); + s = rte_stack_create(name, mp->size, mp->socket_id, flags); + if (s == NULL) + return -rte_errno; - s->size = n; mp->pool_data = s; return 0; } static int -stack_enqueue(struct rte_mempool *mp, void * const *obj_table, - unsigned n) +stack_alloc(struct rte_mempool *mp) { - struct rte_mempool_stack *s = mp->pool_data; - void **cache_objs; - unsigned index; - - rte_spinlock_lock(&s->sl); - cache_objs = &s->objs[s->len]; - - /* Is there sufficient space in the stack ? */ - if ((s->len + n) > s->size) { - rte_spinlock_unlock(&s->sl); - return -ENOBUFS; - } + return __stack_alloc(mp, 0); +} - /* Add elements back into the cache */ - for (index = 0; index < n; ++index, obj_table++) - cache_objs[index] = *obj_table; +static int +lf_stack_alloc(struct rte_mempool *mp) +{ + return __stack_alloc(mp, RTE_STACK_F_LF); +} - s->len += n; +static int +stack_enqueue(struct rte_mempool *mp, void * const *obj_table, + unsigned int n) +{ + struct rte_stack *s = mp->pool_data; - rte_spinlock_unlock(&s->sl); - return 0; + return rte_stack_push(s, obj_table, n) == 0 ? -ENOBUFS : 0; } static int stack_dequeue(struct rte_mempool *mp, void **obj_table, - unsigned n) + unsigned int n) { - struct rte_mempool_stack *s = mp->pool_data; - void **cache_objs; - unsigned index, len; - - rte_spinlock_lock(&s->sl); - - if (unlikely(n > s->len)) { - rte_spinlock_unlock(&s->sl); - return -ENOENT; - } - - cache_objs = s->objs; + struct rte_stack *s = mp->pool_data; - for (index = 0, len = s->len - 1; index < n; - ++index, len--, obj_table++) - *obj_table = cache_objs[len]; - - s->len -= n; - rte_spinlock_unlock(&s->sl); - return 0; + return rte_stack_pop(s, obj_table, n) == 0 ? -ENOBUFS : 0; } static unsigned stack_get_count(const struct rte_mempool *mp) { - struct rte_mempool_stack *s = mp->pool_data; + struct rte_stack *s = mp->pool_data; - return s->len; + return rte_stack_count(s); } static void stack_free(struct rte_mempool *mp) { - rte_free((void *)(mp->pool_data)); + struct rte_stack *s = mp->pool_data; + + rte_stack_free(s); } static struct rte_mempool_ops ops_stack = { @@ -115,4 +84,14 @@ static struct rte_mempool_ops ops_stack = { .get_count = stack_get_count }; +static struct rte_mempool_ops ops_lf_stack = { + .name = "lf_stack", + .alloc = lf_stack_alloc, + .free = stack_free, + .enqueue = stack_enqueue, + .dequeue = stack_dequeue, + .get_count = stack_get_count +}; + MEMPOOL_REGISTER_OPS(ops_stack); +MEMPOOL_REGISTER_OPS(ops_lf_stack);