int rte_fslmc_vfio_dmamap(void)
{
int i = 0, ret;
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
/* Lock before parsing and registering callback to memory subsystem */
- rte_rwlock_read_lock(mem_lock);
+ rte_mcfg_mem_read_lock();
if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) {
- rte_rwlock_read_unlock(mem_lock);
+ rte_mcfg_mem_read_unlock();
return -1;
}
/* Existing segments have been mapped and memory callback for hotplug
* has been installed.
*/
- rte_rwlock_read_unlock(mem_lock);
+ rte_mcfg_mem_read_unlock();
return 0;
}
uintptr_t addr)
{
struct mlx4_priv *priv = dev->data->dev_private;
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
const struct rte_memseg_list *msl;
const struct rte_memseg *ms;
struct mlx4_mr *mr = NULL;
* just single page. If not, go on with the big chunk atomically from
* here.
*/
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_lock();
data_re = data;
if (len > msl->page_sz &&
!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
*/
data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
data.end = data.start + msl->page_sz;
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
mr_free(mr);
goto alloc_resources;
}
DEBUG("port %u found MR for %p on final lookup, abort",
dev->data->port_id, (void *)addr);
rte_rwlock_write_unlock(&priv->mr.rwlock);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
/*
* Must be unlocked before calling rte_free() because
* mlx4_mr_mem_event_free_cb() can be called inside.
/* Lookup can't fail. */
assert(entry->lkey != UINT32_MAX);
rte_rwlock_write_unlock(&priv->mr.rwlock);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return entry->lkey;
err_mrlock:
rte_rwlock_write_unlock(&priv->mr.rwlock);
err_memlock:
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
err_nolock:
/*
* In case of error, as this can be called in a datapath, a warning
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_dev_config *config = &priv->config;
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
const struct rte_memseg_list *msl;
const struct rte_memseg *ms;
struct mlx5_mr *mr = NULL;
* just single page. If not, go on with the big chunk atomically from
* here.
*/
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_lock();
data_re = data;
if (len > msl->page_sz &&
!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
*/
data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
data.end = data.start + msl->page_sz;
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
mr_free(mr);
goto alloc_resources;
}
DEBUG("port %u found MR for %p on final lookup, abort",
dev->data->port_id, (void *)addr);
rte_rwlock_write_unlock(&sh->mr.rwlock);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
/*
* Must be unlocked before calling rte_free() because
* mlx5_mr_mem_event_free_cb() can be called inside.
/* Lookup can't fail. */
assert(entry->lkey != UINT32_MAX);
rte_rwlock_write_unlock(&sh->mr.rwlock);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return entry->lkey;
err_mrlock:
rte_rwlock_write_unlock(&sh->mr.rwlock);
err_memlock:
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
err_nolock:
/*
* In case of error, as this can be called in a datapath, a warning
int
virtio_user_start_device(struct virtio_user_dev *dev)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
uint64_t features;
int ret;
* replaced when we get proper supports from the
* memory subsystem in the future.
*/
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_lock();
pthread_mutex_lock(&dev->mutex);
if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
dev->started = true;
pthread_mutex_unlock(&dev->mutex);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return 0;
error:
pthread_mutex_unlock(&dev->mutex);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
/* TODO: free resource here or caller to check */
return -1;
}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <rte_config.h>
+#include <rte_eal_memconfig.h>
+
+void
+rte_mcfg_mem_read_lock(void)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+}
+
+void
+rte_mcfg_mem_read_unlock(void)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+}
+
+void
+rte_mcfg_mem_write_lock(void)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+}
+
+void
+rte_mcfg_mem_write_unlock(void)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+}
int
rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
int ret = 0;
/* do not allow allocations/frees/init while we iterate */
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_lock();
ret = rte_memseg_contig_walk_thread_unsafe(func, arg);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return ret;
}
int
rte_memseg_walk(rte_memseg_walk_t func, void *arg)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
int ret = 0;
/* do not allow allocations/frees/init while we iterate */
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_lock();
ret = rte_memseg_walk_thread_unsafe(func, arg);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return ret;
}
int
rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
int ret = 0;
/* do not allow allocations/frees/init while we iterate */
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_lock();
ret = rte_memseg_list_walk_thread_unsafe(func, arg);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return ret;
}
int
rte_memseg_get_fd(const struct rte_memseg *ms)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
int ret;
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_lock();
ret = rte_memseg_get_fd_thread_unsafe(ms);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return ret;
}
int
rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
int ret;
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_lock();
ret = rte_memseg_get_fd_offset_thread_unsafe(ms, offset);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return ret;
}
rte_errno = EINVAL;
return -1;
}
- rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_lock();
/* make sure the segment doesn't already exist */
if (malloc_heap_find_external_seg(va_addr, len) != NULL) {
/* memseg list successfully created - increment next socket ID */
mcfg->next_socket_id++;
unlock:
- rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_unlock();
return ret;
}
int
rte_extmem_unregister(void *va_addr, size_t len)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
struct rte_memseg_list *msl;
int ret = 0;
rte_errno = EINVAL;
return -1;
}
- rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_lock();
/* find our segment */
msl = malloc_heap_find_external_seg(va_addr, len);
ret = malloc_heap_destroy_external_seg(msl);
unlock:
- rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_unlock();
return ret;
}
static int
sync_memory(void *va_addr, size_t len, bool attach)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
struct rte_memseg_list *msl;
int ret = 0;
rte_errno = EINVAL;
return -1;
}
- rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_lock();
/* find our segment */
msl = malloc_heap_find_external_seg(va_addr, len);
ret = rte_fbarray_detach(&msl->memseg_arr);
unlock:
- rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_unlock();
return ret;
}
return -1;
/* lock mem hotplug here, to prevent races while we init */
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_lock();
if (rte_eal_memseg_init() < 0)
goto fail;
return 0;
fail:
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return -1;
}
rte_pause();
}
+/**
+ * Lock the internal EAL shared memory configuration for shared access.
+ */
+void
+rte_mcfg_mem_read_lock(void);
+
+/**
+ * Unlock the internal EAL shared memory configuration for shared access.
+ */
+void
+rte_mcfg_mem_read_unlock(void);
+
+/**
+ * Lock the internal EAL shared memory configuration for exclusive access.
+ */
+void
+rte_mcfg_mem_write_lock(void);
+
+/**
+ * Unlock the internal EAL shared memory configuration for exclusive access.
+ */
+void
+rte_mcfg_mem_write_unlock(void);
+
#ifdef __cplusplus
}
#endif
int socket, unsigned int flags, size_t align, size_t bound,
bool contig)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
int ret;
- rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_lock();
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
ret = try_expand_heap_primary(heap, pg_sz, elt_size, socket,
flags, align, bound, contig);
}
- rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_unlock();
return ret;
}
int
malloc_heap_free(struct malloc_elem *elem)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
struct malloc_heap *heap;
void *start, *aligned_start, *end, *aligned_end;
size_t len, aligned_len, page_sz;
/* now we can finally free us some pages */
- rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_lock();
/*
* we allow secondary processes to clear the heap of this allocated
RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
msl->socket_id, aligned_len >> 20ULL);
- rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_unlock();
free_unlock:
rte_spinlock_unlock(&(heap->lock));
return ret;
if (register_mp_requests()) {
RTE_LOG(ERR, EAL, "Couldn't register malloc multiprocess actions\n");
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return -1;
}
* even come before primary itself is fully initialized, and secondaries
* do not need to initialize the heap.
*/
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
/* secondary process does not need to initialize anything */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
'eal_common_launch.c',
'eal_common_lcore.c',
'eal_common_log.c',
+ 'eal_common_mcfg.c',
'eal_common_memalloc.c',
'eal_common_memory.c',
'eal_common_memzone.c',
rte_errno = EINVAL;
return -1;
}
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_lock();
for (idx = 0; idx < RTE_MAX_HEAPS; idx++) {
struct malloc_heap *tmp = &mcfg->malloc_heaps[idx];
rte_errno = ENOENT;
ret = -1;
}
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return ret;
}
if (socket_id == SOCKET_ID_ANY)
return 0;
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_lock();
for (idx = 0; idx < RTE_MAX_HEAPS; idx++) {
struct malloc_heap *tmp = &mcfg->malloc_heaps[idx];
break;
}
}
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return ret;
}
rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len,
rte_iova_t iova_addrs[], unsigned int n_pages, size_t page_sz)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
struct malloc_heap *heap = NULL;
struct rte_memseg_list *msl;
unsigned int n;
rte_errno = EINVAL;
return -1;
}
- rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_lock();
/* find our heap */
heap = find_named_heap(heap_name);
rte_spinlock_unlock(&heap->lock);
unlock:
- rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_unlock();
return ret;
}
int
rte_malloc_heap_memory_remove(const char *heap_name, void *va_addr, size_t len)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
struct malloc_heap *heap = NULL;
struct rte_memseg_list *msl;
int ret;
rte_errno = EINVAL;
return -1;
}
- rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_lock();
/* find our heap */
heap = find_named_heap(heap_name);
if (heap == NULL) {
ret = malloc_heap_destroy_external_seg(msl);
unlock:
- rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_unlock();
return ret;
}
static int
sync_memory(const char *heap_name, void *va_addr, size_t len, bool attach)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
struct malloc_heap *heap = NULL;
struct rte_memseg_list *msl;
int ret;
rte_errno = EINVAL;
return -1;
}
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_lock();
/* find our heap */
heap = find_named_heap(heap_name);
}
}
unlock:
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return ret;
}
/* check if there is space in the heap list, or if heap with this name
* already exists.
*/
- rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_lock();
for (i = 0; i < RTE_MAX_HEAPS; i++) {
struct malloc_heap *tmp = &mcfg->malloc_heaps[i];
/* we're sure that we can create a new heap, so do it */
ret = malloc_heap_create(heap, heap_name);
unlock:
- rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_unlock();
return ret;
}
int
rte_malloc_heap_destroy(const char *heap_name)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
struct malloc_heap *heap = NULL;
int ret;
rte_errno = EINVAL;
return -1;
}
- rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_lock();
/* start from non-socket heaps */
heap = find_named_heap(heap_name);
if (ret < 0)
rte_spinlock_unlock(&heap->lock);
unlock:
- rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_unlock();
return ret;
}
SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_memzone.c
SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_log.c
SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_launch.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_mcfg.c
SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_memalloc.c
SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_memory.c
SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_tailqs.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_memzone.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_log.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_launch.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_mcfg.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_memalloc.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_memory.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_tailqs.c
rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
int *vfio_dev_fd, struct vfio_device_info *device_info)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
struct vfio_group_status group_status = {
.argsz = sizeof(group_status)
};
/* lock memory hotplug before mapping and release it
* after registering callback, to prevent races
*/
- rte_rwlock_read_lock(mem_lock);
+ rte_mcfg_mem_read_lock();
if (vfio_cfg == default_vfio_cfg)
ret = t->dma_map_func(vfio_container_fd);
else
dev_addr, errno, strerror(errno));
close(vfio_group_fd);
rte_vfio_clear_group(vfio_group_fd);
- rte_rwlock_read_unlock(mem_lock);
+ rte_mcfg_mem_read_unlock();
return -1;
}
map->len);
rte_spinlock_recursive_unlock(
&user_mem_maps->lock);
- rte_rwlock_read_unlock(mem_lock);
+ rte_mcfg_mem_read_unlock();
return -1;
}
}
else
ret = 0;
/* unlock memory hotplug */
- rte_rwlock_read_unlock(mem_lock);
+ rte_mcfg_mem_read_unlock();
if (ret && rte_errno != ENOTSUP) {
RTE_LOG(ERR, EAL, "Could not install memory event callback for VFIO\n");
rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
int vfio_dev_fd)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
struct vfio_group_status group_status = {
.argsz = sizeof(group_status)
};
* VFIO device, because this might be the last device and we might need
* to unregister the callback.
*/
- rte_rwlock_read_lock(mem_lock);
+ rte_mcfg_mem_read_lock();
/* get group number */
ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
ret = 0;
out:
- rte_rwlock_read_unlock(mem_lock);
+ rte_mcfg_mem_read_unlock();
return ret;
}
rte_lcore_index;
rte_lcore_to_socket_id;
+ rte_mcfg_mem_read_lock;
+ rte_mcfg_mem_read_unlock;
+ rte_mcfg_mem_write_lock;
+ rte_mcfg_mem_write_unlock;
rte_rand;
rte_srand;