#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
-#include <stdarg.h>
#include <errno.h>
#include <sys/queue.h>
#include <rte_errno.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
-#include <rte_launch.h>
-#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_common.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
-#include <rte_memcpy.h>
#include <rte_memzone.h>
-#include <rte_atomic.h>
#include <rte_fbarray.h>
#include "eal_internal_cfg.h"
*/
static struct malloc_elem *
malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl,
- void *start, size_t len)
+ void *start, size_t len, bool dirty)
{
struct malloc_elem *elem = start;
- malloc_elem_init(elem, heap, msl, len, elem, len);
+ malloc_elem_init(elem, heap, msl, len, elem, len, dirty);
malloc_elem_insert(elem);
found_msl = &mcfg->memsegs[msl_idx];
- malloc_heap_add_memory(heap, found_msl, ms->addr, len);
+ malloc_heap_add_memory(heap, found_msl, ms->addr, len,
+ ms->flags & RTE_MEMSEG_FLAG_DIRTY);
heap->total_size += len;
unsigned int flags, size_t align, size_t bound, bool contig)
{
struct malloc_elem *elem;
+ size_t user_size = size;
size = RTE_CACHE_LINE_ROUNDUP(size);
align = RTE_CACHE_LINE_ROUNDUP(align);
/* increase heap's count of allocated elements */
heap->alloc_count++;
+
+ asan_set_redzone(elem, user_size);
}
return elem == NULL ? NULL : (void *)(&elem[1]);
/* increase heap's count of allocated elements */
heap->alloc_count++;
+
+ asan_set_redzone(elem, size);
}
return elem == NULL ? NULL : (void *)(&elem[1]);
struct rte_memseg_list *msl;
struct malloc_elem *elem = NULL;
size_t alloc_sz;
- int allocd_pages;
+ int allocd_pages, i;
+ bool dirty = false;
void *ret, *map_addr;
alloc_sz = (size_t)pg_sz * n_segs;
goto fail;
}
+ /* Element is dirty if it contains at least one dirty page. */
+ for (i = 0; i < allocd_pages; i++)
+ dirty |= ms[i]->flags & RTE_MEMSEG_FLAG_DIRTY;
+
/* add newly minted memsegs to malloc heap */
- elem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz);
+ elem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz, dirty);
/* try once more, as now we have allocated new memory */
ret = find_suitable_element(heap, elt_size, flags, align, bound,
return ret;
}
+static unsigned int
+malloc_get_numa_socket(void)
+{
+ const struct internal_config *conf = eal_get_internal_configuration();
+ unsigned int socket_id = rte_socket_id();
+ unsigned int idx;
+
+ if (socket_id != (unsigned int)SOCKET_ID_ANY)
+ return socket_id;
+
+ /* for control threads, return first socket where memory is available */
+ for (idx = 0; idx < rte_socket_count(); idx++) {
+ socket_id = rte_socket_id_by_idx(idx);
+ if (conf->socket_mem[socket_id] != 0)
+ return socket_id;
+ }
+
+ return rte_socket_id_by_idx(0);
+}
+
void *
malloc_heap_alloc(const char *type, size_t size, int socket_arg,
unsigned int flags, size_t align, size_t bound, bool contig)
struct rte_memseg_list *msl;
unsigned int i, n_segs, before_space, after_space;
int ret;
+ bool unmapped = false;
const struct internal_config *internal_conf =
eal_get_internal_configuration();
if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
return -1;
+ asan_clear_redzone(elem);
+
/* elem may be merged with previous element, so keep heap address */
heap = elem->heap;
msl = elem->msl;
rte_spinlock_lock(&(heap->lock));
+ void *asan_ptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN + elem->pad);
+ size_t asan_data_len = elem->size - MALLOC_ELEM_OVERHEAD - elem->pad;
+
/* mark element as free */
elem->state = ELEM_FREE;
request_to_primary(&req);
}
+ /* we didn't exit early, meaning we have unmapped some pages */
+ unmapped = true;
+
RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
msl->socket_id, aligned_len >> 20ULL);
rte_mcfg_mem_write_unlock();
free_unlock:
+ asan_set_freezone(asan_ptr, asan_data_len);
+
+ /* if we unmapped some memory, we need to do additional work for ASan */
+ if (unmapped) {
+ void *asan_end = RTE_PTR_ADD(asan_ptr, asan_data_len);
+ void *aligned_end = RTE_PTR_ADD(aligned_start, aligned_len);
+ void *aligned_trailer = RTE_PTR_SUB(aligned_start,
+ MALLOC_ELEM_TRAILER_LEN);
+
+ /*
+ * There was a memory area that was unmapped. This memory area
+ * will have to be marked as available for ASan, because we will
+ * want to use it next time it gets mapped again. The OS memory
+ * protection should trigger a fault on access to these areas
+ * anyway, so we are not giving up any protection.
+ */
+ asan_set_zone(aligned_start, aligned_len, 0x00);
+
+ /*
+ * ...however, when we unmap pages, we create new free elements
+ * which might have been marked as "freed" with an earlier
+ * `asan_set_freezone` call. So, if there is an area past the
+ * unmapped space that was marked as freezone for ASan, we need
+ * to mark the malloc header as available.
+ */
+ if (asan_end > aligned_end)
+ asan_set_zone(aligned_end, MALLOC_ELEM_HEADER_LEN, 0x00);
+
+ /* if there's space before unmapped memory, mark as available */
+ if (asan_ptr < aligned_start)
+ asan_set_zone(aligned_trailer, MALLOC_ELEM_TRAILER_LEN, 0x00);
+ }
+
rte_spinlock_unlock(&(heap->lock));
return ret;
}
memset(msl->base_va, 0, msl->len);
/* now, add newly minted memory to the malloc heap */
- malloc_heap_add_memory(heap, msl, msl->base_va, msl->len);
+ malloc_heap_add_memory(heap, msl, msl->base_va, msl->len, false);
heap->total_size += msl->len;
/* add all IOVA-contiguous areas to the heap */
return rte_memseg_contig_walk(malloc_add_seg, NULL);
}
+
+void
+rte_eal_malloc_heap_cleanup(void)
+{
+ unregister_mp_requests();
+}