}
int __rte_experimental
-rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
+rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
int i, ret = 0;
- /* do not allow allocations/frees/init while we iterate */
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
-
for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
struct rte_memseg_list *msl = &mcfg->memsegs[i];
continue;
ret = func(msl, arg);
- if (ret < 0) {
- ret = -1;
- goto out;
- }
- if (ret > 0) {
- ret = 1;
- goto out;
- }
+ if (ret)
+ return ret;
}
-out:
+ return 0;
+}
+
+int __rte_experimental
+rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int ret = 0;
+
+ /* do not allow allocations/frees/init while we iterate */
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ ret = rte_memseg_list_walk_thread_unsafe(func, arg);
rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+
return ret;
}
int __rte_experimental
rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg);
+/**
+ * Walk each allocated memseg list without performing any locking.
+ *
+ * @note This function does not perform any locking, and is only safe to call
+ * from within memory-related callback functions.
+ *
+ * @param func
+ * Iterator function
+ * @param arg
+ * Argument passed to iterator
+ * @return
+ * 0 if walked over the entire list
+ * 1 if stopped by the user
+ * -1 if user function reported error
+ */
+int __rte_experimental
+rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg);
+
/**
* Dump the physical memory layout to a file.
*
return st.st_size;
}
-/* we cannot use rte_memseg_list_walk() here because we will be holding a
- * write lock whenever we enter every function in this file, however copying
- * the same iteration code everywhere is not ideal as well. so, use a lockless
- * copy of memseg list walk here.
- */
-static int
-memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
-{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- int i, ret = 0;
-
- for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
- struct rte_memseg_list *msl = &mcfg->memsegs[i];
-
- if (msl->base_va == NULL)
- continue;
-
- ret = func(msl, arg);
- if (ret < 0)
- return -1;
- if (ret > 0)
- return 1;
- }
- return 0;
-}
-
/* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
static int lock(int fd, int type)
{
wa.socket = socket;
wa.segs_allocated = 0;
- ret = memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
+ /* memalloc is locked, so it's safe to use thread-unsafe version */
+ ret = rte_memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
if (ret == 0) {
RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
__func__);
wa.ms = cur;
wa.hi = hi;
- walk_res = memseg_list_walk_thread_unsafe(free_seg_walk, &wa);
+ /* memalloc is locked, so it's safe to use thread-unsafe version
+ */
+ walk_res = rte_memseg_list_walk_thread_unsafe(free_seg_walk,
+ &wa);
if (walk_res == 1)
continue;
if (walk_res == 0)
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
return 0;
- if (memseg_list_walk_thread_unsafe(sync_walk, NULL))
+ /* memalloc is locked, so it's safe to call thread-unsafe version */
+ if (rte_memseg_list_walk_thread_unsafe(sync_walk, NULL))
return -1;
return 0;
}