mempool: allow populating with unaligned virtual area
authorOlivier Matz <olivier.matz@6wind.com>
Tue, 5 Nov 2019 15:37:00 +0000 (16:37 +0100)
committerThomas Monjalon <thomas@monjalon.net>
Wed, 6 Nov 2019 10:11:09 +0000 (11:11 +0100)
rte_mempool_populate_virt() currently requires that both addr
and length are page-aligned.

Remove this unneeded constraint which can be annoying with big
hugepages (ex: 1GB).

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
Acked-by: Nipun Gupta <nipun.gupta@nxp.com>
lib/librte_mempool/rte_mempool.c
lib/librte_mempool/rte_mempool.h

index 0f29e87..88e49c7 100644 (file)
@@ -368,17 +368,11 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
        size_t off, phys_len;
        int ret, cnt = 0;
 
-       /* address and len must be page-aligned */
-       if (RTE_PTR_ALIGN_CEIL(addr, pg_sz) != addr)
-               return -EINVAL;
-       if (RTE_ALIGN_CEIL(len, pg_sz) != len)
-               return -EINVAL;
-
        if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
                return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA,
                        len, free_cb, opaque);
 
-       for (off = 0; off + pg_sz <= len &&
+       for (off = 0; off < len &&
                     mp->populated_size < mp->size; off += phys_len) {
 
                iova = rte_mem_virt2iova(addr + off);
@@ -389,12 +383,18 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
                }
 
                /* populate with the largest group of contiguous pages */
-               for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) {
+               for (phys_len = RTE_MIN(
+                       (size_t)(RTE_PTR_ALIGN_CEIL(addr + off + 1, pg_sz) -
+                               (addr + off)),
+                       len - off);
+                    off + phys_len < len;
+                    phys_len = RTE_MIN(phys_len + pg_sz, len - off)) {
                        rte_iova_t iova_tmp;
 
                        iova_tmp = rte_mem_virt2iova(addr + off + phys_len);
 
-                       if (iova_tmp != iova + phys_len)
+                       if (iova_tmp == RTE_BAD_IOVA ||
+                                       iova_tmp != iova + phys_len)
                                break;
                }
 
@@ -575,8 +575,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
                         * have
                         */
                        mz = rte_memzone_reserve_aligned(mz_name, 0,
-                                       mp->socket_id, flags,
-                                       RTE_MAX(pg_sz, align));
+                                       mp->socket_id, flags, align);
                }
                if (mz == NULL) {
                        ret = -rte_errno;
@@ -601,7 +600,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
                                (void *)(uintptr_t)mz);
                else
                        ret = rte_mempool_populate_virt(mp, mz->addr,
-                               RTE_ALIGN_FLOOR(mz->len, pg_sz), pg_sz,
+                               mz->len, pg_sz,
                                rte_mempool_memchunk_mz_free,
                                (void *)(uintptr_t)mz);
                if (ret < 0) {
index 8053f7a..0fe8aa7 100644 (file)
@@ -1042,9 +1042,8 @@ int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
  *   A pointer to the mempool structure.
  * @param addr
  *   The virtual address of memory that should be used to store objects.
- *   Must be page-aligned.
  * @param len
- *   The length of memory in bytes. Must be page-aligned.
+ *   The length of memory in bytes.
  * @param pg_sz
  *   The size of memory pages in this virtual area.
  * @param free_cb