EAL_REGISTER_TAILQ(rte_mempool_tailq)
#define CACHE_FLUSHTHRESH_MULTIPLIER 1.5
+#define CALC_CACHE_FLUSHTHRESH(c) \
+ ((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER))
/*
* return the greatest common divisor between a and b (fast algorithm)
mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
/* asked cache too big */
- if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+ if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
+ CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
rte_errno = EINVAL;
return NULL;
}
mp->header_size = objsz.header_size;
mp->trailer_size = objsz.trailer_size;
mp->cache_size = cache_size;
- mp->cache_flushthresh = (uint32_t)
- (cache_size * CACHE_FLUSHTHRESH_MULTIPLIER);
+ mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);
mp->private_data_size = private_data_size;
/* calculate address of the first element for continuous mempool. */
* If cache_size is non-zero, the rte_mempool library will try to
* limit the accesses to the common lockless pool, by maintaining a
* per-lcore object cache. This argument must be lower or equal to
- * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
+ * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
* cache_size to have "n modulo cache_size == 0": if this is
* not the case, some elements will always stay in the pool and will
* never be used. The access to the per-lcore table is of course