From 30e6399892276f3940bbd7c29e93f1017e4d8168 Mon Sep 17 00:00:00 2001 From: Cunming Liang Date: Tue, 17 Feb 2015 10:08:13 +0800 Subject: [PATCH] mempool: support non-EAL thread For non-EAL thread, bypass per lcore cache, directly use ring pool. It allows using rte_mempool in either EAL thread or any user pthread. As in non-EAL thread, it directly rely on rte_ring and it's none preemptive. It doesn't suggest to run multi-pthread/cpu which compete the rte_mempool. It will get bad performance and has critical risk if scheduling policy is RT. Haven't found significant performance decrease by mempool_perf_test. Signed-off-by: Cunming Liang Acked-by: Olivier Matz Acked-by: Konstantin Ananyev --- lib/librte_mempool/rte_mempool.h | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 48e9972264..974e8d748f 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -198,10 +198,12 @@ struct rte_mempool { * Number to add to the object-oriented statistics. */ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG -#define __MEMPOOL_STAT_ADD(mp, name, n) do { \ - unsigned __lcore_id = rte_lcore_id(); \ - mp->stats[__lcore_id].name##_objs += n; \ - mp->stats[__lcore_id].name##_bulk += 1; \ +#define __MEMPOOL_STAT_ADD(mp, name, n) do { \ + unsigned __lcore_id = rte_lcore_id(); \ + if (__lcore_id < RTE_MAX_LCORE) { \ + mp->stats[__lcore_id].name##_objs += n; \ + mp->stats[__lcore_id].name##_bulk += 1; \ + } \ } while(0) #else #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0) @@ -767,8 +769,9 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, __MEMPOOL_STAT_ADD(mp, put, n); #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 - /* cache is not enabled or single producer */ - if (unlikely(cache_size == 0 || is_mp == 0)) + /* cache is not enabled or single producer or non-EAL thread */ + if (unlikely(cache_size == 0 || is_mp == 0 || + lcore_id >= RTE_MAX_LCORE)) goto ring_enqueue; /* Go straight to ring if put would overflow mem allocated for cache */ @@ -952,7 +955,8 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table, uint32_t cache_size = mp->cache_size; /* cache is not enabled or single consumer */ - if (unlikely(cache_size == 0 || is_mc == 0 || n >= cache_size)) + if (unlikely(cache_size == 0 || is_mc == 0 || + n >= cache_size || lcore_id >= RTE_MAX_LCORE)) goto ring_dequeue; cache = &mp->local_cache[lcore_id]; -- 2.20.1