EAL_REGISTER_TAILQ(rte_mempool_tailq)
#define CACHE_FLUSHTHRESH_MULTIPLIER 1.5
+#define CALC_CACHE_FLUSHTHRESH(c) \
+ ((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER))
/*
* return the greatest common divisor between a and b (fast algorithm)
rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg)
{
uint32_t i, j, k;
- uint32_t pgn;
+ uint32_t pgn, pgf;
uintptr_t end, start, va;
uintptr_t pg_sz;
start = RTE_ALIGN_CEIL(va, align);
end = start + elt_sz;
- pgn = (end >> pg_shift) - (start >> pg_shift);
+ /* index of the first page for the next element. */
+ pgf = (end >> pg_shift) - (start >> pg_shift);
+
+ /* index of the last page for the current element. */
+ pgn = ((end - 1) >> pg_shift) - (start >> pg_shift);
pgn += j;
- /* do we have enough space left for the next element. */
+ /* do we have enough space left for the element. */
if (pgn >= pg_num)
break;
obj_iter(obj_iter_arg, (void *)start,
(void *)end, i);
va = end;
- j = pgn;
+ j += pgf;
i++;
} else {
va = RTE_ALIGN_CEIL((va + 1), pg_sz);
if ((n = rte_mempool_obj_iter(vaddr, elt_num, elt_sz, 1,
paddr, pg_num, pg_shift, mempool_lelem_iter,
&uv)) != elt_num) {
- return (-n);
+ return (-(ssize_t)n);
}
uv = RTE_ALIGN_CEIL(uv, pg_sz);
mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
/* asked cache too big */
- if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+ if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
+ CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
rte_errno = EINVAL;
return NULL;
}
mp->header_size = objsz.header_size;
mp->trailer_size = objsz.trailer_size;
mp->cache_size = cache_size;
- mp->cache_flushthresh = (uint32_t)
- (cache_size * CACHE_FLUSHTHRESH_MULTIPLIER);
+ mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);
mp->private_data_size = private_data_size;
/* calculate address of the first element for continuous mempool. */