In that case, the pad header is used to locate the actual malloc element
header for the block.
+* dirty - this flag is only meaningful when ``state`` is ``FREE``.
+ It indicates that the content of the element is not fully zero-filled.
+ Memory from such blocks must be cleared when requested via ``rte_zmalloc*()``.
+
* pad - this holds the length of the padding present at the start of the block.
In the case of a normal block header, it is added to the address of the end
of the header to give the address of the start of the data area, i.e. the
void
malloc_elem_init(struct malloc_elem *elem, struct malloc_heap *heap,
struct rte_memseg_list *msl, size_t size,
- struct malloc_elem *orig_elem, size_t orig_size)
+ struct malloc_elem *orig_elem, size_t orig_size, bool dirty)
{
elem->heap = heap;
elem->msl = msl;
elem->next = NULL;
memset(&elem->free_list, 0, sizeof(elem->free_list));
elem->state = ELEM_FREE;
+ elem->dirty = dirty;
elem->size = size;
elem->pad = 0;
elem->orig_elem = orig_elem;
const size_t new_elem_size = elem->size - old_elem_size;
malloc_elem_init(split_pt, elem->heap, elem->msl, new_elem_size,
- elem->orig_elem, elem->orig_size);
+ elem->orig_elem, elem->orig_size, elem->dirty);
split_pt->prev = elem;
split_pt->next = next_elem;
if (next_elem)
else
elem1->heap->last = elem1;
elem1->next = next;
+ elem1->dirty |= elem2->dirty;
if (elem1->pad) {
struct malloc_elem *inner = RTE_PTR_ADD(elem1, elem1->pad);
inner->size = elem1->size - elem1->pad;
ptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN);
data_len = elem->size - MALLOC_ELEM_OVERHEAD;
+ /*
+ * Consider the element clean for the purposes of joining.
+ * If both neighbors are clean or non-existent,
+ * the joint element will be clean,
+ * which means the memory should be cleared.
+ * There is no need to clear the memory if the joint element is dirty.
+ */
+ elem->dirty = false;
elem = malloc_elem_join_adjacent_free(elem);
malloc_elem_free_list_insert(elem);
/* decrease heap's count of allocated elements */
elem->heap->alloc_count--;
- /* poison memory */
+#ifndef RTE_MALLOC_DEBUG
+ /* Normally clear the memory when needed. */
+ if (!elem->dirty)
+ memset(ptr, 0, data_len);
+#else
+ /* Always poison the memory in debug mode. */
memset(ptr, MALLOC_POISON, data_len);
+#endif
return elem;
}
LIST_ENTRY(malloc_elem) free_list;
/**< list of free elements in heap */
struct rte_memseg_list *msl;
- volatile enum elem_state state;
+ /** Element state, @c dirty and @c pad validity depends on it. */
+ /* An extra bit is needed to represent enum elem_state as signed int. */
+ enum elem_state state : 3;
+ /** If state == ELEM_FREE: the memory is not filled with zeroes. */
+ uint32_t dirty : 1;
+ /** Reserved for future use. */
+ uint32_t reserved : 28;
uint32_t pad;
size_t size;
struct malloc_elem *orig_elem;
struct rte_memseg_list *msl,
size_t size,
struct malloc_elem *orig_elem,
- size_t orig_size);
+ size_t orig_size,
+ bool dirty);
void
malloc_elem_insert(struct malloc_elem *elem);
*/
static struct malloc_elem *
malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl,
- void *start, size_t len)
+ void *start, size_t len, bool dirty)
{
struct malloc_elem *elem = start;
- malloc_elem_init(elem, heap, msl, len, elem, len);
+ malloc_elem_init(elem, heap, msl, len, elem, len, dirty);
malloc_elem_insert(elem);
found_msl = &mcfg->memsegs[msl_idx];
- malloc_heap_add_memory(heap, found_msl, ms->addr, len);
+ malloc_heap_add_memory(heap, found_msl, ms->addr, len,
+ ms->flags & RTE_MEMSEG_FLAG_DIRTY);
heap->total_size += len;
struct rte_memseg_list *msl;
struct malloc_elem *elem = NULL;
size_t alloc_sz;
- int allocd_pages;
+ int allocd_pages, i;
+ bool dirty = false;
void *ret, *map_addr;
alloc_sz = (size_t)pg_sz * n_segs;
goto fail;
}
+ /* Element is dirty if it contains at least one dirty page. */
+ for (i = 0; i < allocd_pages; i++)
+ dirty |= ms[i]->flags & RTE_MEMSEG_FLAG_DIRTY;
+
/* add newly minted memsegs to malloc heap */
- elem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz);
+ elem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz, dirty);
/* try once more, as now we have allocated new memory */
ret = find_suitable_element(heap, elt_size, flags, align, bound,
memset(msl->base_va, 0, msl->len);
/* now, add newly minted memory to the malloc heap */
- malloc_heap_add_memory(heap, msl, msl->base_va, msl->len);
+ malloc_heap_add_memory(heap, msl, msl->base_va, msl->len, false);
heap->total_size += msl->len;
{
void *ptr = rte_malloc_socket(type, size, align, socket);
+ if (ptr != NULL) {
+ struct malloc_elem *elem = malloc_elem_from_data(ptr);
+
+ if (elem->dirty) {
+ memset(ptr, 0, size);
+ } else {
#ifdef RTE_MALLOC_DEBUG
- /*
- * If DEBUG is enabled, then freed memory is marked with poison
- * value and set to zero on allocation.
- * If DEBUG is not enabled then memory is already zeroed.
- */
- if (ptr != NULL)
- memset(ptr, 0, size);
+ /*
+ * If DEBUG is enabled, then freed memory is marked
+ * with a poison value and set to zero on allocation.
+ * If DEBUG is disabled then memory is already zeroed.
+ */
+ memset(ptr, 0, size);
#endif
+ }
+ }
rte_eal_trace_mem_zmalloc(type, size, align, socket, ptr);
return ptr;
extern "C" {
#endif
+#include <rte_bitops.h>
#include <rte_common.h>
#include <rte_compat.h>
#include <rte_config.h>
#define SOCKET_ID_ANY -1 /**< Any NUMA socket. */
+/** Prevent this segment from being freed back to the OS. */
+#define RTE_MEMSEG_FLAG_DO_NOT_FREE RTE_BIT32(0)
+/** This segment is not filled with zeros. */
+#define RTE_MEMSEG_FLAG_DIRTY RTE_BIT32(1)
+
/**
* Physical memory segment descriptor.
*/
-#define RTE_MEMSEG_FLAG_DO_NOT_FREE (1 << 0)
-/**< Prevent this segment from being freed back to the OS. */
struct rte_memseg {
rte_iova_t iova; /**< Start IO address. */
RTE_STD_C11