This patch adds necessary hooks in the memory allocator for ASan.
This feature is currently available in DPDK only on Linux x86_64.
If other OS/architectures want to support it, ASAN_SHADOW_OFFSET must be
defined and RTE_MALLOC_ASAN must be set accordingly in meson.
Signed-off-by: Xueqin Lin <xueqin.lin@intel.com>
Signed-off-by: Zhihong Peng <zhihongx.peng@intel.com>
Acked-by: Anatoly Burakov <anatoly.burakov@intel.com>
add_project_link_arguments('-lasan', language: 'c')
dpdk_extra_ldflags += '-lasan'
endif
+
+ if is_linux and arch_subdir == 'x86' and dpdk_conf.get('RTE_ARCH_64')
+ dpdk_conf.set10('RTE_MALLOC_ASAN', true)
+ endif
endif
if get_option('default_library') == 'both'
- The libasan package must be installed when compiling with gcc in Centos/RHEL.
- If the program is tested using cmdline, you may need to execute the
"stty echo" command when an error occurs.
+
+ASan is aware of DPDK memory allocations, thanks to added instrumentation.
+This is only enabled on x86_64 at the moment.
+Other architectures may have to define ASAN_SHADOW_OFFSET.
+
+Example heap-buffer-overflow error
+----------------------------------
+
+Add below unit test code in examples/helloworld/main.c::
+
+ Add code to helloworld:
+ char *p = rte_zmalloc(NULL, 9, 0);
+ if (!p) {
+ printf("rte_zmalloc error.\n");
+ return -1;
+ }
+ p[9] = 'a';
+
+Above code will result in heap-buffer-overflow error if ASan is enabled, because apply 9 bytes of memory but access the tenth byte, detailed error log as below::
+
+ ==369953==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x7fb17f465809 at pc 0x5652e6707b84 bp 0x7ffea70eea20 sp 0x7ffea70eea10 WRITE of size 1 at 0x7fb17f465809 thread T0
+ #0 0x5652e6707b83 in main ../examples/helloworld/main.c:47
+ #1 0x7fb94953c0b2 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x270b2)
+ #2 0x5652e67079bd in _start (/home/pzh/asan_test/x86_64-native-linuxapp-gcc/examples/dpdk-helloworld+0x8329bd)
+
+ Address 0x7fb17f465809 is a wild pointer.
+ SUMMARY: AddressSanitizer: heap-buffer-overflow ../examples/helloworld/main.c:47 in main
+
+Note::
+
+ - Some of the features of ASan (for example, 'Display memory application location, currently
+ displayed as a wild pointer') are not currently supported with DPDK allocations.
+
+Example use-after-free error
+----------------------------
+
+Add below unit test code in examples/helloworld/main.c::
+
+ Add code to helloworld:
+ char *p = rte_zmalloc(NULL, 9, 0);
+ if (!p) {
+ printf("rte_zmalloc error.\n");
+ return -1;
+ }
+ rte_free(p);
+ *p = 'a';
+
+Above code will result in use-after-free error if ASan is enabled, because apply 9 bytes of memory but access the first byte after release, detailed error log as below::
+
+ ==417048==ERROR: AddressSanitizer: heap-use-after-free on address 0x7fc83f465800 at pc 0x564308a39b89 bp 0x7ffc8c85bf50 sp 0x7ffc8c85bf40 WRITE of size 1 at 0x7fc83f465800 thread T0
+ #0 0x564308a39b88 in main ../examples/helloworld/main.c:48
+ #1 0x7fd0079c60b2 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x270b2)
+ #2 0x564308a399bd in _start (/home/pzh/asan_test/x86_64-native-linuxapp-gcc/examples/dpdk-helloworld+0x8329bd)
+
+ Address 0x7fc83f465800 is a wild pointer.
+ SUMMARY: AddressSanitizer: heap-use-after-free ../examples/helloworld/main.c:48 in main
struct malloc_elem *new_free_elem =
RTE_PTR_ADD(new_elem, size + MALLOC_ELEM_OVERHEAD);
+ asan_clear_split_alloczone(new_free_elem);
+
split_elem(elem, new_free_elem);
malloc_elem_free_list_insert(new_free_elem);
elem->state = ELEM_BUSY;
elem->pad = old_elem_size;
+ asan_clear_alloczone(elem);
+
/* put a dummy header in padding, to point to real element header */
if (elem->pad > 0) { /* pad will be at least 64-bytes, as everything
* is cache-line aligned */
return new_elem;
}
+ asan_clear_split_alloczone(new_elem);
+
/* we are going to split the element in two. The original element
* remains free, and the new element is the one allocated.
* Re-insert original element, in case its new size makes it
* belong on a different list.
*/
+
split_elem(elem, new_elem);
+
+ asan_clear_alloczone(new_elem);
+
new_elem->state = ELEM_BUSY;
malloc_elem_free_list_insert(elem);
if (next && next_elem_is_adjacent(elem)) {
len_after = RTE_PTR_DIFF(next, hide_end);
if (len_after >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
+ asan_clear_split_alloczone(hide_end);
+
/* split after */
split_elem(elem, hide_end);
if (prev && prev_elem_is_adjacent(elem)) {
len_before = RTE_PTR_DIFF(hide_start, elem);
if (len_before >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
+ asan_clear_split_alloczone(hide_start);
+
/* split before */
split_elem(elem, hide_start);
}
}
+ asan_clear_alloczone(elem);
+
remove_elem(elem);
}
const size_t new_size = size + elem->pad + MALLOC_ELEM_OVERHEAD;
/* if we request a smaller size, then always return ok */
- if (elem->size >= new_size)
+ if (elem->size >= new_size) {
+ asan_clear_alloczone(elem);
return 0;
+ }
/* check if there is a next element, it's free and adjacent */
if (!elem->next || elem->next->state != ELEM_FREE ||
/* now we have a big block together. Lets cut it down a bit, by splitting */
struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE);
+
+ asan_clear_split_alloczone(split_pt);
+
split_elem(elem, split_pt);
malloc_elem_free_list_insert(split_pt);
}
+
+ asan_clear_alloczone(elem);
+
return 0;
}
uint64_t header_cookie; /* Cookie marking start of data */
/* trailer cookie at start + size */
#endif
+#ifdef RTE_MALLOC_ASAN
+ size_t user_size;
+ uint64_t asan_cookie[2]; /* must be next to header_cookie */
+#endif
} __rte_cache_aligned;
+static const unsigned int MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem);
+
#ifndef RTE_MALLOC_DEBUG
-static const unsigned MALLOC_ELEM_TRAILER_LEN = 0;
+#ifdef RTE_MALLOC_ASAN
+static const unsigned int MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE;
+#else
+static const unsigned int MALLOC_ELEM_TRAILER_LEN;
+#endif
/* dummy function - just check if pointer is non-null */
static inline int
#else
-static const unsigned MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE;
+static const unsigned int MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE;
#define MALLOC_HEADER_COOKIE 0xbadbadbadadd2e55ULL /**< Header cookie. */
#define MALLOC_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
#endif
-static const unsigned MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem);
#define MALLOC_ELEM_OVERHEAD (MALLOC_ELEM_HEADER_LEN + MALLOC_ELEM_TRAILER_LEN)
+#ifdef RTE_MALLOC_ASAN
+
+#ifdef RTE_ARCH_X86_64
+#define ASAN_SHADOW_OFFSET 0x00007fff8000
+#endif
+
+#define ASAN_SHADOW_GRAIN_SIZE 8
+#define ASAN_MEM_FREE_FLAG 0xfd
+#define ASAN_MEM_REDZONE_FLAG 0xfa
+#define ASAN_SHADOW_SCALE 3
+
+#define ASAN_MEM_SHIFT(mem) ((void *)((uintptr_t)(mem) >> ASAN_SHADOW_SCALE))
+#define ASAN_MEM_TO_SHADOW(mem) \
+ RTE_PTR_ADD(ASAN_MEM_SHIFT(mem), ASAN_SHADOW_OFFSET)
+
+#if defined(__clang__)
+#define __rte_no_asan __attribute__((no_sanitize("address", "hwaddress")))
+#else
+#define __rte_no_asan __attribute__((no_sanitize_address))
+#endif
+
+__rte_no_asan
+static inline void
+asan_set_shadow(void *addr, char val)
+{
+ *(char *)addr = val;
+}
+
+static inline void
+asan_set_zone(void *ptr, size_t len, uint32_t val)
+{
+ size_t offset, i;
+ void *shadow;
+ size_t zone_len = len / ASAN_SHADOW_GRAIN_SIZE;
+ if (len % ASAN_SHADOW_GRAIN_SIZE != 0)
+ zone_len += 1;
+
+ for (i = 0; i < zone_len; i++) {
+ offset = i * ASAN_SHADOW_GRAIN_SIZE;
+ shadow = ASAN_MEM_TO_SHADOW((uintptr_t)ptr + offset);
+ asan_set_shadow(shadow, val);
+ }
+}
+
+/*
+ * When the memory is released, the release mark is
+ * set in the corresponding range of the shadow area.
+ */
+static inline void
+asan_set_freezone(void *ptr, size_t size)
+{
+ asan_set_zone(ptr, size, ASAN_MEM_FREE_FLAG);
+}
+
+/*
+ * When the memory is allocated, memory state must set as accessible.
+ */
+static inline void
+asan_clear_alloczone(struct malloc_elem *elem)
+{
+ asan_set_zone((void *)elem, elem->size, 0x0);
+}
+
+static inline void
+asan_clear_split_alloczone(struct malloc_elem *elem)
+{
+ void *ptr = RTE_PTR_SUB(elem, MALLOC_ELEM_TRAILER_LEN);
+ asan_set_zone(ptr, MALLOC_ELEM_OVERHEAD, 0x0);
+}
+
+/*
+ * When the memory is allocated, the memory boundary is
+ * marked in the corresponding range of the shadow area.
+ * Requirement: redzone >= 16, is a power of two.
+ */
+static inline void
+asan_set_redzone(struct malloc_elem *elem, size_t user_size)
+{
+ uintptr_t head_redzone;
+ uintptr_t tail_redzone;
+ void *front_shadow;
+ void *tail_shadow;
+ uint32_t val;
+
+ if (elem != NULL) {
+ if (elem->state != ELEM_PAD)
+ elem = RTE_PTR_ADD(elem, elem->pad);
+
+ elem->user_size = user_size;
+
+ /* Set mark before the start of the allocated memory */
+ head_redzone = (uintptr_t)RTE_PTR_ADD(elem,
+ MALLOC_ELEM_HEADER_LEN - ASAN_SHADOW_GRAIN_SIZE);
+ front_shadow = ASAN_MEM_TO_SHADOW(head_redzone);
+ asan_set_shadow(front_shadow, ASAN_MEM_REDZONE_FLAG);
+ front_shadow = ASAN_MEM_TO_SHADOW(head_redzone
+ - ASAN_SHADOW_GRAIN_SIZE);
+ asan_set_shadow(front_shadow, ASAN_MEM_REDZONE_FLAG);
+
+ /* Set mark after the end of the allocated memory */
+ tail_redzone = (uintptr_t)RTE_PTR_ADD(elem,
+ MALLOC_ELEM_HEADER_LEN
+ + elem->user_size);
+ tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone);
+ val = (tail_redzone % ASAN_SHADOW_GRAIN_SIZE);
+ val = (val == 0) ? ASAN_MEM_REDZONE_FLAG : val;
+ asan_set_shadow(tail_shadow, val);
+ tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone
+ + ASAN_SHADOW_GRAIN_SIZE);
+ asan_set_shadow(tail_shadow, ASAN_MEM_REDZONE_FLAG);
+ }
+}
+
+/*
+ * When the memory is released, the mark of the memory boundary
+ * in the corresponding range of the shadow area is cleared.
+ * Requirement: redzone >= 16, is a power of two.
+ */
+static inline void
+asan_clear_redzone(struct malloc_elem *elem)
+{
+ uintptr_t head_redzone;
+ uintptr_t tail_redzone;
+ void *head_shadow;
+ void *tail_shadow;
+
+ if (elem != NULL) {
+ elem = RTE_PTR_ADD(elem, elem->pad);
+
+ /* Clear mark before the start of the allocated memory */
+ head_redzone = (uintptr_t)RTE_PTR_ADD(elem,
+ MALLOC_ELEM_HEADER_LEN - ASAN_SHADOW_GRAIN_SIZE);
+ head_shadow = ASAN_MEM_TO_SHADOW(head_redzone);
+ asan_set_shadow(head_shadow, 0x00);
+ head_shadow = ASAN_MEM_TO_SHADOW(head_redzone
+ - ASAN_SHADOW_GRAIN_SIZE);
+ asan_set_shadow(head_shadow, 0x00);
+
+ /* Clear mark after the end of the allocated memory */
+ tail_redzone = (uintptr_t)RTE_PTR_ADD(elem,
+ MALLOC_ELEM_HEADER_LEN + elem->user_size);
+ tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone);
+ asan_set_shadow(tail_shadow, 0x00);
+ tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone
+ + ASAN_SHADOW_GRAIN_SIZE);
+ asan_set_shadow(tail_shadow, 0x00);
+ }
+}
+
+static inline size_t
+old_malloc_size(struct malloc_elem *elem)
+{
+ if (elem->state != ELEM_PAD)
+ elem = RTE_PTR_ADD(elem, elem->pad);
+
+ return elem->user_size;
+}
+
+#else /* !RTE_MALLOC_ASAN */
+
+#define __rte_no_asan
+
+static inline void
+asan_set_freezone(void *ptr __rte_unused, size_t size __rte_unused) { }
+
+static inline void
+asan_clear_alloczone(struct malloc_elem *elem __rte_unused) { }
+
+static inline void
+asan_clear_split_alloczone(struct malloc_elem *elem __rte_unused) { }
+
+static inline void
+asan_set_redzone(struct malloc_elem *elem __rte_unused,
+ size_t user_size __rte_unused) { }
+
+static inline void
+asan_clear_redzone(struct malloc_elem *elem __rte_unused) { }
+
+static inline size_t
+old_malloc_size(struct malloc_elem *elem)
+{
+ return elem->size - elem->pad - MALLOC_ELEM_OVERHEAD;
+}
+#endif /* !RTE_MALLOC_ASAN */
+
/*
* Given a pointer to the start of a memory block returned by malloc, get
* the actual malloc_elem header for that block.
unsigned int flags, size_t align, size_t bound, bool contig)
{
struct malloc_elem *elem;
+ size_t user_size = size;
size = RTE_CACHE_LINE_ROUNDUP(size);
align = RTE_CACHE_LINE_ROUNDUP(align);
/* increase heap's count of allocated elements */
heap->alloc_count++;
+
+ asan_set_redzone(elem, user_size);
}
return elem == NULL ? NULL : (void *)(&elem[1]);
/* increase heap's count of allocated elements */
heap->alloc_count++;
+
+ asan_set_redzone(elem, size);
}
return elem == NULL ? NULL : (void *)(&elem[1]);
if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
return -1;
+ asan_clear_redzone(elem);
+
/* elem may be merged with previous element, so keep heap address */
heap = elem->heap;
msl = elem->msl;
rte_spinlock_lock(&(heap->lock));
+ void *asan_ptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN + elem->pad);
+ size_t asan_data_len = elem->size - MALLOC_ELEM_OVERHEAD - elem->pad;
+
/* mark element as free */
elem->state = ELEM_FREE;
rte_mcfg_mem_write_unlock();
free_unlock:
+ asan_set_freezone(asan_ptr, asan_data_len);
+
rte_spinlock_unlock(&(heap->lock));
return ret;
}
void *
rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket)
{
+ size_t user_size;
+
if (ptr == NULL)
return rte_malloc_socket(NULL, size, align, socket);
return NULL;
}
+ user_size = size;
+
size = RTE_CACHE_LINE_ROUNDUP(size), align = RTE_CACHE_LINE_ROUNDUP(align);
/* check requested socket id and alignment matches first, and if ok,
RTE_PTR_ALIGN(ptr, align) == ptr &&
malloc_heap_resize(elem, size) == 0) {
rte_eal_trace_mem_realloc(size, align, socket, ptr);
+
+ asan_set_redzone(elem, user_size);
+
return ptr;
}
if (new_ptr == NULL)
return NULL;
/* elem: |pad|data_elem|data|trailer| */
- const size_t old_size = elem->size - elem->pad - MALLOC_ELEM_OVERHEAD;
+ const size_t old_size = old_malloc_size(elem);
rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size);
rte_free(ptr);