#define CALC_CACHE_FLUSHTHRESH(c) \
((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER))
+#if defined(RTE_ARCH_X86)
/*
* return the greatest common divisor between a and b (fast algorithm)
*
}
/*
- * Depending on memory configuration, objects addresses are spread
+ * Depending on memory configuration on x86 arch, objects addresses are spread
* between channels and ranks in RAM: the pool allocator will add
* padding between objects. This function return the new size of the
* object.
*/
-static unsigned optimize_object_size(unsigned obj_size)
+static unsigned int
+arch_mem_object_align(unsigned int obj_size)
{
unsigned nrank, nchan;
unsigned new_obj_size;
new_obj_size++;
return new_obj_size * RTE_MEMPOOL_ALIGN;
}
+#else
+static unsigned int
+arch_mem_object_align(unsigned int obj_size)
+{
+ return obj_size;
+}
+#endif
struct pagesz_walk_arg {
int socket_id;
*/
if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
unsigned new_size;
- new_size = optimize_object_size(sz->header_size + sz->elt_size +
- sz->trailer_size);
+ new_size = arch_mem_object_align
+ (sz->header_size + sz->elt_size + sz->trailer_size);
sz->trailer_size = new_size - sz->header_size - sz->elt_size;
}
* zone. Return the number of objects added, or a negative value
* on error.
*/
-int
-rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+static int
+__rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque)
{
off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_MEMPOOL_ALIGN) - vaddr;
if (off > len) {
- ret = -EINVAL;
+ ret = 0;
goto fail;
}
/* not enough room to store one object */
if (i == 0) {
- ret = -EINVAL;
+ ret = 0;
goto fail;
}
return ret;
}
+int
+rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ int ret;
+
+ ret = __rte_mempool_populate_iova(mp, vaddr, iova, len, free_cb,
+ opaque);
+ if (ret == 0)
+ ret = -EINVAL;
+
+ return ret;
+}
+
static rte_iova_t
get_iova(void *addr)
{
break;
}
- ret = rte_mempool_populate_iova(mp, addr + off, iova,
+ ret = __rte_mempool_populate_iova(mp, addr + off, iova,
phys_len, free_cb, opaque);
+ if (ret == 0)
+ continue;
if (ret < 0)
goto fail;
/* no need to call the free callback for next chunks */
cnt += ret;
}
+ if (cnt == 0)
+ return -EINVAL;
+
return cnt;
fail:
unsigned mz_id, n;
int ret;
bool need_iova_contig_obj;
+ size_t max_alloc_size = SIZE_MAX;
ret = mempool_ops_alloc_once(mp);
if (ret != 0)
if (min_chunk_size == (size_t)mem_size)
mz_flags |= RTE_MEMZONE_IOVA_CONTIG;
- mz = rte_memzone_reserve_aligned(mz_name, mem_size,
+ /* Allocate a memzone, retrying with a smaller area on ENOMEM */
+ do {
+ mz = rte_memzone_reserve_aligned(mz_name,
+ RTE_MIN((size_t)mem_size, max_alloc_size),
mp->socket_id, mz_flags, align);
- /* don't try reserving with 0 size if we were asked to reserve
- * IOVA-contiguous memory.
- */
- if (min_chunk_size < (size_t)mem_size && mz == NULL) {
- /* not enough memory, retry with the biggest zone we
- * have
- */
- mz = rte_memzone_reserve_aligned(mz_name, 0,
- mp->socket_id, mz_flags, align);
- }
+ if (mz == NULL && rte_errno != ENOMEM)
+ break;
+
+ max_alloc_size = RTE_MIN(max_alloc_size,
+ (size_t)mem_size) / 2;
+ } while (mz == NULL && max_alloc_size >= min_chunk_size);
+
if (mz == NULL) {
ret = -rte_errno;
goto fail;
}
- if (mz->len < min_chunk_size) {
- rte_memzone_free(mz);
- ret = -ENOMEM;
- goto fail;
- }
-
if (need_iova_contig_obj)
iova = mz->iova;
else