#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
-#include <stdarg.h>
#include <errno.h>
#include <sys/queue.h>
#include <rte_errno.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
-#include <rte_launch.h>
-#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_common.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
-#include <rte_memcpy.h>
#include <rte_memzone.h>
-#include <rte_atomic.h>
#include <rte_fbarray.h>
#include "eal_internal_cfg.h"
struct rte_memseg_list *msl;
unsigned int i, n_segs, before_space, after_space;
int ret;
+ bool unmapped = false;
const struct internal_config *internal_conf =
eal_get_internal_configuration();
request_to_primary(&req);
}
+ /* we didn't exit early, meaning we have unmapped some pages */
+ unmapped = true;
+
RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
msl->socket_id, aligned_len >> 20ULL);
free_unlock:
asan_set_freezone(asan_ptr, asan_data_len);
+ /* if we unmapped some memory, we need to do additional work for ASan */
+ if (unmapped) {
+ void *asan_end = RTE_PTR_ADD(asan_ptr, asan_data_len);
+ void *aligned_end = RTE_PTR_ADD(aligned_start, aligned_len);
+ void *aligned_trailer = RTE_PTR_SUB(aligned_start,
+ MALLOC_ELEM_TRAILER_LEN);
+
+ /*
+ * There was a memory area that was unmapped. This memory area
+ * will have to be marked as available for ASan, because we will
+ * want to use it next time it gets mapped again. The OS memory
+ * protection should trigger a fault on access to these areas
+ * anyway, so we are not giving up any protection.
+ */
+ asan_set_zone(aligned_start, aligned_len, 0x00);
+
+ /*
+ * ...however, when we unmap pages, we create new free elements
+ * which might have been marked as "freed" with an earlier
+ * `asan_set_freezone` call. So, if there is an area past the
+ * unmapped space that was marked as freezone for ASan, we need
+ * to mark the malloc header as available.
+ */
+ if (asan_end > aligned_end)
+ asan_set_zone(aligned_end, MALLOC_ELEM_HEADER_LEN, 0x00);
+
+ /* if there's space before unmapped memory, mark as available */
+ if (asan_ptr < aligned_start)
+ asan_set_zone(aligned_trailer, MALLOC_ELEM_TRAILER_LEN, 0x00);
+ }
+
rte_spinlock_unlock(&(heap->lock));
return ret;
}