mem: provide thread-unsafe contig walk variant
authorAnatoly Burakov <anatoly.burakov@intel.com>
Tue, 12 Jun 2018 09:46:14 +0000 (10:46 +0100)
committerThomas Monjalon <thomas@monjalon.net>
Fri, 13 Jul 2018 09:20:06 +0000 (11:20 +0200)
Sometimes, user code needs to walk memseg list while being inside
a memory-related callback. Rather than making everyone copy around
the same iteration code and depending on DPDK internals, provide an
official way to do memseg_contig_walk() inside callbacks.

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
lib/librte_eal/common/eal_common_memory.c
lib/librte_eal/common/include/rte_memory.h
lib/librte_eal/rte_eal_version.map

index 30671ef..b69c829 100644 (file)
@@ -418,14 +418,11 @@ rte_mem_lock_page(const void *virt)
 }
 
 int __rte_experimental
-rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
+rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
 {
        struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        int i, ms_idx, ret = 0;
 
-       /* do not allow allocations/frees/init while we iterate */
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
-
        for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
                struct rte_memseg_list *msl = &mcfg->memsegs[i];
                const struct rte_memseg *ms;
@@ -450,19 +447,26 @@ rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
                        len = n_segs * msl->page_sz;
 
                        ret = func(msl, ms, len, arg);
-                       if (ret < 0) {
-                               ret = -1;
-                               goto out;
-                       } else if (ret > 0) {
-                               ret = 1;
-                               goto out;
-                       }
+                       if (ret)
+                               return ret;
                        ms_idx = rte_fbarray_find_next_used(arr,
                                        ms_idx + n_segs);
                }
        }
-out:
+       return 0;
+}
+
+int __rte_experimental
+rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       int ret = 0;
+
+       /* do not allow allocations/frees/init while we iterate */
+       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       ret = rte_memseg_contig_walk_thread_unsafe(func, arg);
        rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+
        return ret;
 }
 
index aab9f6f..aeba38b 100644 (file)
@@ -263,6 +263,24 @@ rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg);
 int __rte_experimental
 rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg);
 
+/**
+ * Walk each VA-contiguous area without performing any locking.
+ *
+ * @note This function does not perform any locking, and is only safe to call
+ *       from within memory-related callback functions.
+ *
+ * @param func
+ *   Iterator function
+ * @param arg
+ *   Argument passed to iterator
+ * @return
+ *   0 if walked over the entire list
+ *   1 if stopped by the user
+ *   -1 if user function reported error
+ */
+int __rte_experimental
+rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg);
+
 /**
  * Dump the physical memory layout to a file.
  *
index 0557682..f01661f 100644 (file)
@@ -292,6 +292,7 @@ EXPERIMENTAL {
        rte_mem_virt2memseg;
        rte_mem_virt2memseg_list;
        rte_memseg_contig_walk;
+       rte_memseg_contig_walk_thread_unsafe;
        rte_memseg_list_walk;
        rte_memseg_walk;
        rte_mp_action_register;