4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_tailq.h>
45 #include <rte_eal_memconfig.h>
46 #include <rte_launch.h>
47 #include <rte_per_lcore.h>
48 #include <rte_lcore.h>
49 #include <rte_common.h>
50 #include <rte_string_fns.h>
51 #include <rte_spinlock.h>
52 #include <rte_memcpy.h>
53 #include <rte_atomic.h>
55 #include "malloc_elem.h"
56 #include "malloc_heap.h"
58 /* since the memzone size starts with a digit, it will appear unquoted in
59 * rte_config.h, so quote it so it can be passed to rte_str_to_size */
60 #define MALLOC_MEMZONE_SIZE RTE_STR(RTE_MALLOC_MEMZONE_SIZE)
63 * returns the configuration setting for the memzone size as a size_t value
66 get_malloc_memzone_size(void)
68 return rte_str_to_size(MALLOC_MEMZONE_SIZE);
72 * reserve an extra memory zone and make it available for use by a particular
73 * heap. This reserves the zone and sets a dummy malloc_elem header at the end
74 * to prevent overflow. The rest of the zone is added to free list as a single
78 malloc_heap_add_memzone(struct malloc_heap *heap, size_t size, unsigned align)
80 const unsigned mz_flags = 0;
81 const size_t block_size = get_malloc_memzone_size();
82 /* ensure the data we want to allocate will fit in the memzone */
83 const size_t min_size = size + align + MALLOC_ELEM_OVERHEAD * 2;
84 const struct rte_memzone *mz = NULL;
86 size_t mz_size = min_size;
87 if (mz_size < block_size)
90 char mz_name[RTE_MEMZONE_NAMESIZE];
91 rte_snprintf(mz_name, sizeof(mz_name), "MALLOC_S%u_HEAP_%u",
92 heap->numa_socket, heap->mz_count++);
94 /* try getting a block. if we fail and we don't need as big a block
95 * as given in the config, we can shrink our request and try again
98 mz = rte_memzone_reserve(mz_name, mz_size,
99 heap->numa_socket, mz_flags);
102 } while (mz == NULL && mz_size > min_size);
106 /* allocate the memory block headers, one at end, one at start */
107 struct malloc_elem *start_elem = (struct malloc_elem *)mz->addr;
108 struct malloc_elem *end_elem = RTE_PTR_ADD(mz->addr,
109 mz_size - MALLOC_ELEM_OVERHEAD);
110 end_elem = RTE_PTR_ALIGN_FLOOR(end_elem, CACHE_LINE_SIZE);
112 const unsigned elem_size = (uintptr_t)end_elem - (uintptr_t)start_elem;
113 malloc_elem_init(start_elem, heap, mz, elem_size);
114 malloc_elem_mkend(end_elem, start_elem);
116 start_elem->next_free = heap->free_head;
117 heap->free_head = start_elem;
118 /* increase heap total size by size of new memzone */
119 heap->total_size+=mz_size - MALLOC_ELEM_OVERHEAD;
124 * initialise a malloc heap object. The heap is locked with a private
125 * lock while being initialised. This function should only be called the
126 * first time a thread calls malloc - if even then, as heaps are per-socket
130 malloc_heap_init(struct malloc_heap *heap)
132 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
134 rte_eal_mcfg_wait_complete(mcfg);
135 while (heap->initialised != INITIALISED) {
136 if (rte_atomic32_cmpset(
137 (volatile uint32_t*)&heap->initialised,
138 NOT_INITIALISED, INITIALISING)) {
140 heap->free_head = NULL;
142 heap->alloc_count = 0;
143 heap->total_size = 0;
145 * Find NUMA socket of heap that is being initialised, so that
146 * malloc_heaps[n].numa_socket == n
148 heap->numa_socket = heap - mcfg->malloc_heaps;
149 rte_spinlock_init(&heap->lock);
150 heap->initialised = INITIALISED;
156 * Iterates through the freelist for a heap to find a free element
157 * which can store data of the required size and with the requested alignment.
158 * Returns null on failure, or pointer to element on success, with the pointer
159 * to the previous element in the list, if any, being returned in a parameter
160 * (to make removing the element from the free list faster).
162 static struct malloc_elem *
163 find_suitable_element(struct malloc_heap *heap, size_t size,
164 unsigned align, struct malloc_elem **prev)
166 struct malloc_elem *elem, *min_elem, *min_prev;
169 elem = heap->free_head;
172 min_sz = (size_t) SIZE_MAX;
176 if (malloc_elem_can_hold(elem, size, align)) {
177 if (min_sz > elem->size) {
184 elem = elem->next_free;
190 * Main function called by malloc to allocate a block of memory from the
191 * heap. It locks the free list, scans it, and adds a new memzone if the
192 * scan fails. Once the new memzone is added, it re-scans and should return
193 * the new element after releasing the lock.
196 malloc_heap_alloc(struct malloc_heap *heap,
197 const char *type __attribute__((unused)), size_t size, unsigned align)
199 if (!heap->initialised)
200 malloc_heap_init(heap);
202 size = CACHE_LINE_ROUNDUP(size);
203 align = CACHE_LINE_ROUNDUP(align);
204 rte_spinlock_lock(&heap->lock);
205 struct malloc_elem *prev, *elem = find_suitable_element(heap,
208 if ((malloc_heap_add_memzone(heap, size, align)) == 0)
209 elem = find_suitable_element(heap, size, align, &prev);
213 elem = malloc_elem_alloc(elem, size, align, prev);
214 /* increase heap's count of allocated elements */
217 rte_spinlock_unlock(&heap->lock);
218 return elem == NULL ? NULL : (void *)(&elem[1]);
223 * Function to retrieve data for heap on given socket
226 malloc_heap_get_stats(const struct malloc_heap *heap,
227 struct rte_malloc_socket_stats *socket_stats)
229 if (!heap->initialised)
232 struct malloc_elem *elem = heap->free_head;
234 /* Initialise variables for heap */
235 socket_stats->free_count = 0;
236 socket_stats->heap_freesz_bytes = 0;
237 socket_stats->greatest_free_size = 0;
239 /* Iterate through free list */
241 socket_stats->free_count++;
242 socket_stats->heap_freesz_bytes += elem->size;
243 if (elem->size > socket_stats->greatest_free_size)
244 socket_stats->greatest_free_size = elem->size;
246 elem = elem->next_free;
248 /* Get stats on overall heap and allocated memory on this heap */
249 socket_stats->heap_totalsz_bytes = heap->total_size;
250 socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes -
251 socket_stats->heap_freesz_bytes);
252 socket_stats->alloc_count = heap->alloc_count;