}
int __rte_experimental
-rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
+rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
int i, ms_idx, ret = 0;
- /* do not allow allocations/frees/init while we iterate */
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
-
for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
struct rte_memseg_list *msl = &mcfg->memsegs[i];
const struct rte_memseg *ms;
len = n_segs * msl->page_sz;
ret = func(msl, ms, len, arg);
- if (ret < 0) {
- ret = -1;
- goto out;
- } else if (ret > 0) {
- ret = 1;
- goto out;
- }
+ if (ret)
+ return ret;
ms_idx = rte_fbarray_find_next_used(arr,
ms_idx + n_segs);
}
}
-out:
+ return 0;
+}
+
+int __rte_experimental
+rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int ret = 0;
+
+ /* do not allow allocations/frees/init while we iterate */
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ ret = rte_memseg_contig_walk_thread_unsafe(func, arg);
rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+
return ret;
}
int __rte_experimental
rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg);
+/**
+ * Walk each VA-contiguous area without performing any locking.
+ *
+ * @note This function does not perform any locking, and is only safe to call
+ * from within memory-related callback functions.
+ *
+ * @param func
+ * Iterator function
+ * @param arg
+ * Argument passed to iterator
+ * @return
+ * 0 if walked over the entire list
+ * 1 if stopped by the user
+ * -1 if user function reported error
+ */
+int __rte_experimental
+rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg);
+
/**
* Dump the physical memory layout to a file.
*