mem: add API to lock/unlock memory hotplug
authorAnatoly Burakov <anatoly.burakov@intel.com>
Fri, 5 Jul 2019 13:10:27 +0000 (14:10 +0100)
committerThomas Monjalon <thomas@monjalon.net>
Fri, 5 Jul 2019 20:12:40 +0000 (22:12 +0200)
Currently, the memory hotplug is locked automatically by all
memory-related _walk() functions, but sometimes locking the
memory subsystem outside of them is needed. There is no
public API to do that, so it creates a dependency on shared
memory config to be public. Fix this by introducing a new
API to lock/unlock the memory hotplug subsystem.

Create a new common file for all things mem config, and a
new API namespace rte_mcfg_*, and search-and-replace all
usages of the locks with the new API.

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: David Marchand <david.marchand@redhat.com>
14 files changed:
drivers/bus/fslmc/fslmc_vfio.c
drivers/net/mlx4/mlx4_mr.c
drivers/net/mlx5/mlx5_mr.c
drivers/net/virtio/virtio_user/virtio_user_dev.c
lib/librte_eal/common/eal_common_mcfg.c [new file with mode: 0644]
lib/librte_eal/common/eal_common_memory.c
lib/librte_eal/common/include/rte_eal_memconfig.h
lib/librte_eal/common/malloc_heap.c
lib/librte_eal/common/meson.build
lib/librte_eal/common/rte_malloc.c
lib/librte_eal/freebsd/eal/Makefile
lib/librte_eal/linux/eal/Makefile
lib/librte_eal/linux/eal/eal_vfio.c
lib/librte_eal/rte_eal_version.map

index 1aae56f..44e4fa6 100644 (file)
@@ -347,14 +347,12 @@ fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
 int rte_fslmc_vfio_dmamap(void)
 {
        int i = 0, ret;
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
-       rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
 
        /* Lock before parsing and registering callback to memory subsystem */
-       rte_rwlock_read_lock(mem_lock);
+       rte_mcfg_mem_read_lock();
 
        if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) {
-               rte_rwlock_read_unlock(mem_lock);
+               rte_mcfg_mem_read_unlock();
                return -1;
        }
 
@@ -378,7 +376,7 @@ int rte_fslmc_vfio_dmamap(void)
        /* Existing segments have been mapped and memory callback for hotplug
         * has been installed.
         */
-       rte_rwlock_read_unlock(mem_lock);
+       rte_mcfg_mem_read_unlock();
 
        return 0;
 }
index 48d458a..80827ce 100644 (file)
@@ -593,7 +593,6 @@ mlx4_mr_create_primary(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
                       uintptr_t addr)
 {
        struct mlx4_priv *priv = dev->data->dev_private;
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        const struct rte_memseg_list *msl;
        const struct rte_memseg *ms;
        struct mlx4_mr *mr = NULL;
@@ -696,7 +695,7 @@ alloc_resources:
         * just single page. If not, go on with the big chunk atomically from
         * here.
         */
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_lock();
        data_re = data;
        if (len > msl->page_sz &&
            !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
@@ -714,7 +713,7 @@ alloc_resources:
                 */
                data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
                data.end = data.start + msl->page_sz;
-               rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+               rte_mcfg_mem_read_unlock();
                mr_free(mr);
                goto alloc_resources;
        }
@@ -734,7 +733,7 @@ alloc_resources:
                DEBUG("port %u found MR for %p on final lookup, abort",
                      dev->data->port_id, (void *)addr);
                rte_rwlock_write_unlock(&priv->mr.rwlock);
-               rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+               rte_mcfg_mem_read_unlock();
                /*
                 * Must be unlocked before calling rte_free() because
                 * mlx4_mr_mem_event_free_cb() can be called inside.
@@ -802,12 +801,12 @@ alloc_resources:
        /* Lookup can't fail. */
        assert(entry->lkey != UINT32_MAX);
        rte_rwlock_write_unlock(&priv->mr.rwlock);
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
        return entry->lkey;
 err_mrlock:
        rte_rwlock_write_unlock(&priv->mr.rwlock);
 err_memlock:
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
 err_nolock:
        /*
         * In case of error, as this can be called in a datapath, a warning
index 66e8e87..872d059 100644 (file)
@@ -580,7 +580,6 @@ mlx5_mr_create_primary(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_ibv_shared *sh = priv->sh;
        struct mlx5_dev_config *config = &priv->config;
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        const struct rte_memseg_list *msl;
        const struct rte_memseg *ms;
        struct mlx5_mr *mr = NULL;
@@ -684,7 +683,7 @@ alloc_resources:
         * just single page. If not, go on with the big chunk atomically from
         * here.
         */
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_lock();
        data_re = data;
        if (len > msl->page_sz &&
            !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
@@ -702,7 +701,7 @@ alloc_resources:
                 */
                data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
                data.end = data.start + msl->page_sz;
-               rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+               rte_mcfg_mem_read_unlock();
                mr_free(mr);
                goto alloc_resources;
        }
@@ -722,7 +721,7 @@ alloc_resources:
                DEBUG("port %u found MR for %p on final lookup, abort",
                      dev->data->port_id, (void *)addr);
                rte_rwlock_write_unlock(&sh->mr.rwlock);
-               rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+               rte_mcfg_mem_read_unlock();
                /*
                 * Must be unlocked before calling rte_free() because
                 * mlx5_mr_mem_event_free_cb() can be called inside.
@@ -790,12 +789,12 @@ alloc_resources:
        /* Lookup can't fail. */
        assert(entry->lkey != UINT32_MAX);
        rte_rwlock_write_unlock(&sh->mr.rwlock);
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
        return entry->lkey;
 err_mrlock:
        rte_rwlock_write_unlock(&sh->mr.rwlock);
 err_memlock:
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
 err_nolock:
        /*
         * In case of error, as this can be called in a datapath, a warning
index e743695..c3ab9a2 100644 (file)
@@ -125,7 +125,6 @@ is_vhost_user_by_type(const char *path)
 int
 virtio_user_start_device(struct virtio_user_dev *dev)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        uint64_t features;
        int ret;
 
@@ -142,7 +141,7 @@ virtio_user_start_device(struct virtio_user_dev *dev)
         * replaced when we get proper supports from the
         * memory subsystem in the future.
         */
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_lock();
        pthread_mutex_lock(&dev->mutex);
 
        if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
@@ -180,12 +179,12 @@ virtio_user_start_device(struct virtio_user_dev *dev)
 
        dev->started = true;
        pthread_mutex_unlock(&dev->mutex);
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
 
        return 0;
 error:
        pthread_mutex_unlock(&dev->mutex);
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
        /* TODO: free resource here or caller to check */
        return -1;
 }
diff --git a/lib/librte_eal/common/eal_common_mcfg.c b/lib/librte_eal/common/eal_common_mcfg.c
new file mode 100644 (file)
index 0000000..985d36c
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <rte_config.h>
+#include <rte_eal_memconfig.h>
+
+void
+rte_mcfg_mem_read_lock(void)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+}
+
+void
+rte_mcfg_mem_read_unlock(void)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+}
+
+void
+rte_mcfg_mem_write_lock(void)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+}
+
+void
+rte_mcfg_mem_write_unlock(void)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+}
index 858d563..fe22b13 100644 (file)
@@ -596,13 +596,12 @@ rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
 int
 rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        int ret = 0;
 
        /* do not allow allocations/frees/init while we iterate */
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_lock();
        ret = rte_memseg_contig_walk_thread_unsafe(func, arg);
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
 
        return ret;
 }
@@ -638,13 +637,12 @@ rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
 int
 rte_memseg_walk(rte_memseg_walk_t func, void *arg)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        int ret = 0;
 
        /* do not allow allocations/frees/init while we iterate */
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_lock();
        ret = rte_memseg_walk_thread_unsafe(func, arg);
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
 
        return ret;
 }
@@ -671,13 +669,12 @@ rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
 int
 rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        int ret = 0;
 
        /* do not allow allocations/frees/init while we iterate */
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_lock();
        ret = rte_memseg_list_walk_thread_unsafe(func, arg);
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
 
        return ret;
 }
@@ -727,12 +724,11 @@ rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms)
 int
 rte_memseg_get_fd(const struct rte_memseg *ms)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        int ret;
 
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_lock();
        ret = rte_memseg_get_fd_thread_unsafe(ms);
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
 
        return ret;
 }
@@ -783,12 +779,11 @@ rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
 int
 rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        int ret;
 
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_lock();
        ret = rte_memseg_get_fd_offset_thread_unsafe(ms, offset);
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
 
        return ret;
 }
@@ -809,7 +804,7 @@ rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[],
                rte_errno = EINVAL;
                return -1;
        }
-       rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_lock();
 
        /* make sure the segment doesn't already exist */
        if (malloc_heap_find_external_seg(va_addr, len) != NULL) {
@@ -838,14 +833,13 @@ rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[],
        /* memseg list successfully created - increment next socket ID */
        mcfg->next_socket_id++;
 unlock:
-       rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_unlock();
        return ret;
 }
 
 int
 rte_extmem_unregister(void *va_addr, size_t len)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        struct rte_memseg_list *msl;
        int ret = 0;
 
@@ -853,7 +847,7 @@ rte_extmem_unregister(void *va_addr, size_t len)
                rte_errno = EINVAL;
                return -1;
        }
-       rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_lock();
 
        /* find our segment */
        msl = malloc_heap_find_external_seg(va_addr, len);
@@ -865,14 +859,13 @@ rte_extmem_unregister(void *va_addr, size_t len)
 
        ret = malloc_heap_destroy_external_seg(msl);
 unlock:
-       rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_unlock();
        return ret;
 }
 
 static int
 sync_memory(void *va_addr, size_t len, bool attach)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        struct rte_memseg_list *msl;
        int ret = 0;
 
@@ -880,7 +873,7 @@ sync_memory(void *va_addr, size_t len, bool attach)
                rte_errno = EINVAL;
                return -1;
        }
-       rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_lock();
 
        /* find our segment */
        msl = malloc_heap_find_external_seg(va_addr, len);
@@ -895,7 +888,7 @@ sync_memory(void *va_addr, size_t len, bool attach)
                ret = rte_fbarray_detach(&msl->memseg_arr);
 
 unlock:
-       rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_unlock();
        return ret;
 }
 
@@ -923,7 +916,7 @@ rte_eal_memory_init(void)
                return -1;
 
        /* lock mem hotplug here, to prevent races while we init */
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_lock();
 
        if (rte_eal_memseg_init() < 0)
                goto fail;
@@ -942,6 +935,6 @@ rte_eal_memory_init(void)
 
        return 0;
 fail:
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
        return -1;
 }
index 84aabe3..a554518 100644 (file)
@@ -100,6 +100,30 @@ rte_eal_mcfg_wait_complete(struct rte_mem_config* mcfg)
                rte_pause();
 }
 
+/**
+ * Lock the internal EAL shared memory configuration for shared access.
+ */
+void
+rte_mcfg_mem_read_lock(void);
+
+/**
+ * Unlock the internal EAL shared memory configuration for shared access.
+ */
+void
+rte_mcfg_mem_read_unlock(void);
+
+/**
+ * Lock the internal EAL shared memory configuration for exclusive access.
+ */
+void
+rte_mcfg_mem_write_lock(void);
+
+/**
+ * Unlock the internal EAL shared memory configuration for exclusive access.
+ */
+void
+rte_mcfg_mem_write_unlock(void);
+
 #ifdef __cplusplus
 }
 #endif
index f923593..f1d31de 100644 (file)
@@ -485,10 +485,9 @@ try_expand_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
                int socket, unsigned int flags, size_t align, size_t bound,
                bool contig)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        int ret;
 
-       rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_lock();
 
        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
                ret = try_expand_heap_primary(heap, pg_sz, elt_size, socket,
@@ -498,7 +497,7 @@ try_expand_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
                                flags, align, bound, contig);
        }
 
-       rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_unlock();
        return ret;
 }
 
@@ -821,7 +820,6 @@ malloc_heap_free_pages(void *aligned_start, size_t aligned_len)
 int
 malloc_heap_free(struct malloc_elem *elem)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        struct malloc_heap *heap;
        void *start, *aligned_start, *end, *aligned_end;
        size_t len, aligned_len, page_sz;
@@ -935,7 +933,7 @@ malloc_heap_free(struct malloc_elem *elem)
 
        /* now we can finally free us some pages */
 
-       rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_lock();
 
        /*
         * we allow secondary processes to clear the heap of this allocated
@@ -990,7 +988,7 @@ malloc_heap_free(struct malloc_elem *elem)
        RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
                msl->socket_id, aligned_len >> 20ULL);
 
-       rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_unlock();
 free_unlock:
        rte_spinlock_unlock(&(heap->lock));
        return ret;
@@ -1344,7 +1342,7 @@ rte_eal_malloc_heap_init(void)
 
        if (register_mp_requests()) {
                RTE_LOG(ERR, EAL, "Couldn't register malloc multiprocess actions\n");
-               rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+               rte_mcfg_mem_read_unlock();
                return -1;
        }
 
@@ -1352,7 +1350,7 @@ rte_eal_malloc_heap_init(void)
         * even come before primary itself is fully initialized, and secondaries
         * do not need to initialize the heap.
         */
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
 
        /* secondary process does not need to initialize anything */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
index bafd232..58b433b 100644 (file)
@@ -18,6 +18,7 @@ common_sources = files(
        'eal_common_launch.c',
        'eal_common_lcore.c',
        'eal_common_log.c',
+       'eal_common_mcfg.c',
        'eal_common_memalloc.c',
        'eal_common_memory.c',
        'eal_common_memzone.c',
index b119eba..2cad7be 100644 (file)
@@ -223,7 +223,7 @@ rte_malloc_heap_get_socket(const char *name)
                rte_errno = EINVAL;
                return -1;
        }
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_lock();
        for (idx = 0; idx < RTE_MAX_HEAPS; idx++) {
                struct malloc_heap *tmp = &mcfg->malloc_heaps[idx];
 
@@ -239,7 +239,7 @@ rte_malloc_heap_get_socket(const char *name)
                rte_errno = ENOENT;
                ret = -1;
        }
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
 
        return ret;
 }
@@ -254,7 +254,7 @@ rte_malloc_heap_socket_is_external(int socket_id)
        if (socket_id == SOCKET_ID_ANY)
                return 0;
 
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_lock();
        for (idx = 0; idx < RTE_MAX_HEAPS; idx++) {
                struct malloc_heap *tmp = &mcfg->malloc_heaps[idx];
 
@@ -264,7 +264,7 @@ rte_malloc_heap_socket_is_external(int socket_id)
                        break;
                }
        }
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
 
        return ret;
 }
@@ -352,7 +352,6 @@ int
 rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len,
                rte_iova_t iova_addrs[], unsigned int n_pages, size_t page_sz)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        struct malloc_heap *heap = NULL;
        struct rte_memseg_list *msl;
        unsigned int n;
@@ -369,7 +368,7 @@ rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len,
                rte_errno = EINVAL;
                return -1;
        }
-       rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_lock();
 
        /* find our heap */
        heap = find_named_heap(heap_name);
@@ -398,7 +397,7 @@ rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len,
        rte_spinlock_unlock(&heap->lock);
 
 unlock:
-       rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_unlock();
 
        return ret;
 }
@@ -406,7 +405,6 @@ unlock:
 int
 rte_malloc_heap_memory_remove(const char *heap_name, void *va_addr, size_t len)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        struct malloc_heap *heap = NULL;
        struct rte_memseg_list *msl;
        int ret;
@@ -418,7 +416,7 @@ rte_malloc_heap_memory_remove(const char *heap_name, void *va_addr, size_t len)
                rte_errno = EINVAL;
                return -1;
        }
-       rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_lock();
        /* find our heap */
        heap = find_named_heap(heap_name);
        if (heap == NULL) {
@@ -448,7 +446,7 @@ rte_malloc_heap_memory_remove(const char *heap_name, void *va_addr, size_t len)
        ret = malloc_heap_destroy_external_seg(msl);
 
 unlock:
-       rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_unlock();
 
        return ret;
 }
@@ -456,7 +454,6 @@ unlock:
 static int
 sync_memory(const char *heap_name, void *va_addr, size_t len, bool attach)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        struct malloc_heap *heap = NULL;
        struct rte_memseg_list *msl;
        int ret;
@@ -468,7 +465,7 @@ sync_memory(const char *heap_name, void *va_addr, size_t len, bool attach)
                rte_errno = EINVAL;
                return -1;
        }
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_lock();
 
        /* find our heap */
        heap = find_named_heap(heap_name);
@@ -516,7 +513,7 @@ sync_memory(const char *heap_name, void *va_addr, size_t len, bool attach)
                }
        }
 unlock:
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
        return ret;
 }
 
@@ -549,7 +546,7 @@ rte_malloc_heap_create(const char *heap_name)
        /* check if there is space in the heap list, or if heap with this name
         * already exists.
         */
-       rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_lock();
 
        for (i = 0; i < RTE_MAX_HEAPS; i++) {
                struct malloc_heap *tmp = &mcfg->malloc_heaps[i];
@@ -578,7 +575,7 @@ rte_malloc_heap_create(const char *heap_name)
        /* we're sure that we can create a new heap, so do it */
        ret = malloc_heap_create(heap, heap_name);
 unlock:
-       rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_unlock();
 
        return ret;
 }
@@ -586,7 +583,6 @@ unlock:
 int
 rte_malloc_heap_destroy(const char *heap_name)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        struct malloc_heap *heap = NULL;
        int ret;
 
@@ -597,7 +593,7 @@ rte_malloc_heap_destroy(const char *heap_name)
                rte_errno = EINVAL;
                return -1;
        }
-       rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_lock();
 
        /* start from non-socket heaps */
        heap = find_named_heap(heap_name);
@@ -621,7 +617,7 @@ rte_malloc_heap_destroy(const char *heap_name)
        if (ret < 0)
                rte_spinlock_unlock(&heap->lock);
 unlock:
-       rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_write_unlock();
 
        return ret;
 }
index ca616c4..eb92127 100644 (file)
@@ -44,6 +44,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_timer.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_memzone.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_log.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_launch.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_mcfg.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_memalloc.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_memory.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_tailqs.c
index 729795a..dfe8e9a 100644 (file)
@@ -52,6 +52,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_timer.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_memzone.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_log.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_launch.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_mcfg.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_memalloc.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_memory.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_tailqs.c
index feada64..96a03a6 100644 (file)
@@ -635,8 +635,6 @@ int
 rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
                int *vfio_dev_fd, struct vfio_device_info *device_info)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
-       rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
        struct vfio_group_status group_status = {
                        .argsz = sizeof(group_status)
        };
@@ -739,7 +737,7 @@ rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
                        /* lock memory hotplug before mapping and release it
                         * after registering callback, to prevent races
                         */
-                       rte_rwlock_read_lock(mem_lock);
+                       rte_mcfg_mem_read_lock();
                        if (vfio_cfg == default_vfio_cfg)
                                ret = t->dma_map_func(vfio_container_fd);
                        else
@@ -750,7 +748,7 @@ rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
                                        dev_addr, errno, strerror(errno));
                                close(vfio_group_fd);
                                rte_vfio_clear_group(vfio_group_fd);
-                               rte_rwlock_read_unlock(mem_lock);
+                               rte_mcfg_mem_read_unlock();
                                return -1;
                        }
 
@@ -781,7 +779,7 @@ rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
                                                        map->len);
                                        rte_spinlock_recursive_unlock(
                                                        &user_mem_maps->lock);
-                                       rte_rwlock_read_unlock(mem_lock);
+                                       rte_mcfg_mem_read_unlock();
                                        return -1;
                                }
                        }
@@ -795,7 +793,7 @@ rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
                        else
                                ret = 0;
                        /* unlock memory hotplug */
-                       rte_rwlock_read_unlock(mem_lock);
+                       rte_mcfg_mem_read_unlock();
 
                        if (ret && rte_errno != ENOTSUP) {
                                RTE_LOG(ERR, EAL, "Could not install memory event callback for VFIO\n");
@@ -862,8 +860,6 @@ int
 rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
                    int vfio_dev_fd)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
-       rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
        struct vfio_group_status group_status = {
                        .argsz = sizeof(group_status)
        };
@@ -876,7 +872,7 @@ rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
         * VFIO device, because this might be the last device and we might need
         * to unregister the callback.
         */
-       rte_rwlock_read_lock(mem_lock);
+       rte_mcfg_mem_read_lock();
 
        /* get group number */
        ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
@@ -947,7 +943,7 @@ rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
        ret = 0;
 
 out:
-       rte_rwlock_read_unlock(mem_lock);
+       rte_mcfg_mem_read_unlock();
        return ret;
 }
 
index a53a29a..754060d 100644 (file)
@@ -292,6 +292,10 @@ DPDK_19.08 {
 
        rte_lcore_index;
        rte_lcore_to_socket_id;
+       rte_mcfg_mem_read_lock;
+       rte_mcfg_mem_read_unlock;
+       rte_mcfg_mem_write_lock;
+       rte_mcfg_mem_write_unlock;
        rte_rand;
        rte_srand;