#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
-#include <stdarg.h>
#include <errno.h>
#include <sys/queue.h>
#include <rte_errno.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
-#include <rte_launch.h>
-#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_common.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
-#include <rte_memcpy.h>
#include <rte_memzone.h>
-#include <rte_atomic.h>
#include <rte_fbarray.h>
#include "eal_internal_cfg.h"
*/
static struct malloc_elem *
malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl,
- void *start, size_t len)
+ void *start, size_t len, bool dirty)
{
struct malloc_elem *elem = start;
- malloc_elem_init(elem, heap, msl, len, elem, len);
+ malloc_elem_init(elem, heap, msl, len, elem, len, dirty);
malloc_elem_insert(elem);
found_msl = &mcfg->memsegs[msl_idx];
- malloc_heap_add_memory(heap, found_msl, ms->addr, len);
+ malloc_heap_add_memory(heap, found_msl, ms->addr, len,
+ ms->flags & RTE_MEMSEG_FLAG_DIRTY);
heap->total_size += len;
struct rte_memseg_list *msl;
struct malloc_elem *elem = NULL;
size_t alloc_sz;
- int allocd_pages;
+ int allocd_pages, i;
+ bool dirty = false;
void *ret, *map_addr;
alloc_sz = (size_t)pg_sz * n_segs;
goto fail;
}
+ /* Element is dirty if it contains at least one dirty page. */
+ for (i = 0; i < allocd_pages; i++)
+ dirty |= ms[i]->flags & RTE_MEMSEG_FLAG_DIRTY;
+
/* add newly minted memsegs to malloc heap */
- elem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz);
+ elem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz, dirty);
/* try once more, as now we have allocated new memory */
ret = find_suitable_element(heap, elt_size, flags, align, bound,
struct rte_memseg_list *msl;
unsigned int i, n_segs, before_space, after_space;
int ret;
+ bool unmapped = false;
const struct internal_config *internal_conf =
eal_get_internal_configuration();
request_to_primary(&req);
}
+ /* we didn't exit early, meaning we have unmapped some pages */
+ unmapped = true;
+
RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
msl->socket_id, aligned_len >> 20ULL);
free_unlock:
asan_set_freezone(asan_ptr, asan_data_len);
+ /* if we unmapped some memory, we need to do additional work for ASan */
+ if (unmapped) {
+ void *asan_end = RTE_PTR_ADD(asan_ptr, asan_data_len);
+ void *aligned_end = RTE_PTR_ADD(aligned_start, aligned_len);
+ void *aligned_trailer = RTE_PTR_SUB(aligned_start,
+ MALLOC_ELEM_TRAILER_LEN);
+
+ /*
+ * There was a memory area that was unmapped. This memory area
+ * will have to be marked as available for ASan, because we will
+ * want to use it next time it gets mapped again. The OS memory
+ * protection should trigger a fault on access to these areas
+ * anyway, so we are not giving up any protection.
+ */
+ asan_set_zone(aligned_start, aligned_len, 0x00);
+
+ /*
+ * ...however, when we unmap pages, we create new free elements
+ * which might have been marked as "freed" with an earlier
+ * `asan_set_freezone` call. So, if there is an area past the
+ * unmapped space that was marked as freezone for ASan, we need
+ * to mark the malloc header as available.
+ */
+ if (asan_end > aligned_end)
+ asan_set_zone(aligned_end, MALLOC_ELEM_HEADER_LEN, 0x00);
+
+ /* if there's space before unmapped memory, mark as available */
+ if (asan_ptr < aligned_start)
+ asan_set_zone(aligned_trailer, MALLOC_ELEM_TRAILER_LEN, 0x00);
+ }
+
rte_spinlock_unlock(&(heap->lock));
return ret;
}
memset(msl->base_va, 0, msl->len);
/* now, add newly minted memory to the malloc heap */
- malloc_heap_add_memory(heap, msl, msl->base_va, msl->len);
+ malloc_heap_add_memory(heap, msl, msl->base_va, msl->len, false);
heap->total_size += msl->len;
/* add all IOVA-contiguous areas to the heap */
return rte_memseg_contig_walk(malloc_add_seg, NULL);
}
+
+void
+rte_eal_malloc_heap_cleanup(void)
+{
+ unregister_mp_requests();
+}