#include <rte_string_fns.h>
#include <rte_spinlock.h>
#include <rte_memcpy.h>
+#include <rte_memzone.h>
#include <rte_atomic.h>
#include <rte_fbarray.h>
#include "eal_internal_cfg.h"
#include "eal_memalloc.h"
+#include "eal_memcfg.h"
+#include "eal_private.h"
#include "malloc_elem.h"
#include "malloc_heap.h"
#include "malloc_mp.h"
size = RTE_CACHE_LINE_ROUNDUP(size);
align = RTE_CACHE_LINE_ROUNDUP(align);
+ /* roundup might cause an overflow */
+ if (size == 0)
+ return NULL;
elem = find_suitable_element(heap, size, flags, align, bound, contig);
if (elem != NULL) {
elem = malloc_elem_alloc(elem, size, align, bound, contig);
size_t elt_size, int socket, unsigned int flags, size_t align,
size_t bound, bool contig)
{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
struct malloc_mp_req req;
int req_result;
req.alloc_req.elt_size = elt_size;
req.alloc_req.page_sz = pg_sz;
req.alloc_req.socket = socket;
- req.alloc_req.heap = heap; /* it's in shared memory */
+ req.alloc_req.malloc_heap_idx = heap - mcfg->malloc_heaps;
req_result = request_to_primary(&req);
int socket, unsigned int flags, size_t align, size_t bound,
bool contig)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
int ret;
- rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_lock();
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
ret = try_expand_heap_primary(heap, pg_sz, elt_size, socket,
flags, align, bound, contig);
}
- rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_unlock();
return ret;
}
unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
int socket_id;
void *ret;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
rte_spinlock_lock(&(heap->lock));
align = align == 0 ? 1 : align;
/* for legacy mode, try once and with all flags */
- if (internal_config.legacy_mem) {
+ if (internal_conf->legacy_mem) {
ret = heap_alloc(heap, type, size, flags, align, bound, contig);
goto alloc_unlock;
}
int
malloc_heap_free(struct malloc_elem *elem)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
struct malloc_heap *heap;
void *start, *aligned_start, *end, *aligned_end;
size_t len, aligned_len, page_sz;
struct rte_memseg_list *msl;
unsigned int i, n_segs, before_space, after_space;
int ret;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
return -1;
/* ...of which we can't avail if we are in legacy mode, or if this is an
* externally allocated segment.
*/
- if (internal_config.legacy_mem || (msl->external > 0))
+ if (internal_conf->legacy_mem || (msl->external > 0))
goto free_unlock;
/* check if we can free any memory back to the system */
* we will defer freeing these hugepages until the entire original allocation
* can be freed
*/
- if (internal_config.match_allocations && elem->size != elem->orig_size)
+ if (internal_conf->match_allocations && elem->size != elem->orig_size)
goto free_unlock;
/* probably, but let's make sure, as we may not be using up full page */
/* now we can finally free us some pages */
- rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_lock();
/*
* we allow secondary processes to clear the heap of this allocated
RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
msl->socket_id, aligned_len >> 20ULL);
- rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_write_unlock();
free_unlock:
rte_spinlock_unlock(&(heap->lock));
return ret;
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
unsigned int i;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- if (internal_config.match_allocations) {
+ if (internal_conf->match_allocations)
RTE_LOG(DEBUG, EAL, "Hugepages will be freed exactly as allocated.\n");
- }
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
/* assign min socket ID to external heaps */
if (register_mp_requests()) {
RTE_LOG(ERR, EAL, "Couldn't register malloc multiprocess actions\n");
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return -1;
}
* even come before primary itself is fully initialized, and secondaries
* do not need to initialize the heap.
*/
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
/* secondary process does not need to initialize anything */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)