if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
else
- off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr;
+ off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_MEMPOOL_ALIGN) - vaddr;
if (off > len) {
ret = -EINVAL;
return ret;
}
+static rte_iova_t
+get_iova(void *addr)
+{
+ struct rte_memseg *ms;
+
+ /* try registered memory first */
+ ms = rte_mem_virt2memseg(addr, NULL);
+ if (ms == NULL || ms->iova == RTE_BAD_IOVA)
+ /* fall back to actual physical address */
+ return rte_mem_virt2iova(addr);
+ return ms->iova + RTE_PTR_DIFF(addr, ms->addr);
+}
+
/* Populate the mempool with a virtual area. Return the number of
* objects added, or a negative value on error.
*/
for (off = 0; off < len &&
mp->populated_size < mp->size; off += phys_len) {
- iova = rte_mem_virt2iova(addr + off);
-
- if (iova == RTE_BAD_IOVA && rte_eal_has_hugepages()) {
- ret = -EINVAL;
- goto fail;
- }
+ iova = get_iova(addr + off);
/* populate with the largest group of contiguous pages */
for (phys_len = RTE_MIN(
phys_len = RTE_MIN(phys_len + pg_sz, len - off)) {
rte_iova_t iova_tmp;
- iova_tmp = rte_mem_virt2iova(addr + off + phys_len);
+ iova_tmp = get_iova(addr + off + phys_len);
if (iova_tmp == RTE_BAD_IOVA ||
iova_tmp != iova + phys_len)
return ret;
}
+/* Get the minimal page size used in a mempool before populating it. */
+int
+rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz)
+{
+ bool need_iova_contig_obj;
+ bool alloc_in_ext_mem;
+ int ret;
+
+ /* check if we can retrieve a valid socket ID */
+ ret = rte_malloc_heap_socket_is_external(mp->socket_id);
+ if (ret < 0)
+ return -EINVAL;
+ alloc_in_ext_mem = (ret == 1);
+ need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
+
+ if (!need_iova_contig_obj)
+ *pg_sz = 0;
+ else if (rte_eal_has_hugepages() || alloc_in_ext_mem)
+ *pg_sz = get_min_page_size(mp->socket_id);
+ else
+ *pg_sz = getpagesize();
+
+ return 0;
+}
+
/* Default function to populate the mempool: allocate memory in memzones,
* and populate them. Return the number of objects added, or a negative
* value on error.
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
ssize_t mem_size;
- size_t align, pg_sz, pg_shift;
+ size_t align, pg_sz, pg_shift = 0;
rte_iova_t iova;
unsigned mz_id, n;
int ret;
bool need_iova_contig_obj;
- bool alloc_in_ext_mem;
+ size_t max_alloc_size = SIZE_MAX;
ret = mempool_ops_alloc_once(mp);
if (ret != 0)
* then just set page shift and page size to 0, because the user has
* indicated that there's no need to care about anything.
*
- * if we do need contiguous objects, there is also an option to reserve
- * the entire mempool memory as one contiguous block of memory, in
- * which case the page shift and alignment wouldn't matter as well.
+ * if we do need contiguous objects (if a mempool driver has its
+ * own calc_size() method returning min_chunk_size = mem_size),
+ * there is also an option to reserve the entire mempool memory
+ * as one contiguous block of memory.
*
* if we require contiguous objects, but not necessarily the entire
- * mempool reserved space to be contiguous, then there are two options.
- *
- * if our IO addresses are virtual, not actual physical (IOVA as VA
- * case), then no page shift needed - our memory allocation will give us
- * contiguous IO memory as far as the hardware is concerned, so
- * act as if we're getting contiguous memory.
+ * mempool reserved space to be contiguous, pg_sz will be != 0,
+ * and the default ops->populate() will take care of not placing
+ * objects across pages.
*
* if our IO addresses are physical, we may get memory from bigger
* pages, or we might get memory from smaller pages, and how much of it
*
* If we fail to get enough contiguous memory, then we'll go and
* reserve space in smaller chunks.
- *
- * We also have to take into account the fact that memory that we're
- * going to allocate from can belong to an externally allocated memory
- * area, in which case the assumption of IOVA as VA mode being
- * synonymous with IOVA contiguousness will not hold.
*/
- /* check if we can retrieve a valid socket ID */
- ret = rte_malloc_heap_socket_is_external(mp->socket_id);
- if (ret < 0)
- return -EINVAL;
- alloc_in_ext_mem = (ret == 1);
need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
+ ret = rte_mempool_get_page_size(mp, &pg_sz);
+ if (ret < 0)
+ return ret;
- if (!need_iova_contig_obj) {
- pg_sz = 0;
- pg_shift = 0;
- } else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) {
- pg_sz = 0;
- pg_shift = 0;
- } else if (rte_eal_has_hugepages() || alloc_in_ext_mem) {
- pg_sz = get_min_page_size(mp->socket_id);
- pg_shift = rte_bsf32(pg_sz);
- } else {
- pg_sz = getpagesize();
+ if (pg_sz != 0)
pg_shift = rte_bsf32(pg_sz);
- }
for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
size_t min_chunk_size;
if (min_chunk_size == (size_t)mem_size)
mz_flags |= RTE_MEMZONE_IOVA_CONTIG;
- mz = rte_memzone_reserve_aligned(mz_name, mem_size,
+ /* Allocate a memzone, retrying with a smaller area on ENOMEM */
+ do {
+ mz = rte_memzone_reserve_aligned(mz_name,
+ RTE_MIN((size_t)mem_size, max_alloc_size),
mp->socket_id, mz_flags, align);
- /* don't try reserving with 0 size if we were asked to reserve
- * IOVA-contiguous memory.
- */
- if (min_chunk_size < (size_t)mem_size && mz == NULL) {
- /* not enough memory, retry with the biggest zone we
- * have
- */
- mz = rte_memzone_reserve_aligned(mz_name, 0,
- mp->socket_id, mz_flags, align);
- }
+ if (mz == NULL && rte_errno != ENOMEM)
+ break;
+
+ max_alloc_size = RTE_MIN(max_alloc_size,
+ (size_t)mem_size) / 2;
+ } while (mz == NULL && max_alloc_size >= min_chunk_size);
+
if (mz == NULL) {
ret = -rte_errno;
goto fail;
}
- if (mz->len < min_chunk_size) {
- rte_memzone_free(mz);
- ret = -ENOMEM;
- goto fail;
- }
-
if (need_iova_contig_obj)
iova = mz->iova;
else
}
ret = mempool_ops_alloc_once(mp);
- if (ret != 0)
- return ret;
+ if (ret < 0) {
+ rte_errno = -ret;
+ return 0;
+ }
size = get_anon_size(mp);
if (size < 0) {
ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(),
rte_mempool_memchunk_anon_free, addr);
- if (ret == 0)
+ if (ret < 0) {
+ rte_errno = -ret;
goto fail;
+ }
return mp->populated_size;