4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <sys/queue.h>
39 #include <rte_memory.h>
40 #include <rte_memzone.h>
42 #include <rte_launch.h>
43 #include <rte_per_lcore.h>
44 #include <rte_lcore.h>
45 #include <rte_debug.h>
46 #include <rte_common.h>
47 #include <rte_spinlock.h>
49 #include "malloc_elem.h"
50 #include "malloc_heap.h"
52 #define MIN_DATA_SIZE (RTE_CACHE_LINE_SIZE)
55 * initialise a general malloc_elem header structure
58 malloc_elem_init(struct malloc_elem *elem,
59 struct malloc_heap *heap, const struct rte_memzone *mz, size_t size)
64 memset(&elem->free_list, 0, sizeof(elem->free_list));
65 elem->state = ELEM_FREE;
73 * initialise a dummy malloc_elem header for the end-of-memzone marker
76 malloc_elem_mkend(struct malloc_elem *elem, struct malloc_elem *prev)
78 malloc_elem_init(elem, prev->heap, prev->mz, 0);
80 elem->state = ELEM_BUSY; /* mark busy so its never merged */
84 * calculate the starting point of where data of the requested size
85 * and alignment would fit in the current element. If the data doesn't
89 elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align)
91 const uintptr_t end_pt = (uintptr_t)elem +
92 elem->size - MALLOC_ELEM_TRAILER_LEN;
93 const uintptr_t new_data_start = RTE_ALIGN_FLOOR((end_pt - size), align);
94 const uintptr_t new_elem_start = new_data_start - MALLOC_ELEM_HEADER_LEN;
96 /* if the new start point is before the exist start, it won't fit */
97 return (new_elem_start < (uintptr_t)elem) ? NULL : (void *)new_elem_start;
101 * use elem_start_pt to determine if we get meet the size and
102 * alignment request from the current element
105 malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align)
107 return elem_start_pt(elem, size, align) != NULL;
111 * split an existing element into two smaller elements at the given
112 * split_pt parameter.
115 split_elem(struct malloc_elem *elem, struct malloc_elem *split_pt)
117 struct malloc_elem *next_elem = RTE_PTR_ADD(elem, elem->size);
118 const unsigned old_elem_size = (uintptr_t)split_pt - (uintptr_t)elem;
119 const unsigned new_elem_size = elem->size - old_elem_size;
121 malloc_elem_init(split_pt, elem->heap, elem->mz, new_elem_size);
122 split_pt->prev = elem;
123 next_elem->prev = split_pt;
124 elem->size = old_elem_size;
129 * Given an element size, compute its freelist index.
130 * We free an element into the freelist containing similarly-sized elements.
131 * We try to allocate elements starting with the freelist containing
132 * similarly-sized elements, and if necessary, we search freelists
133 * containing larger elements.
135 * Example element size ranges for a heap with five free lists:
136 * heap->free_head[0] - (0 , 2^8]
137 * heap->free_head[1] - (2^8 , 2^10]
138 * heap->free_head[2] - (2^10 ,2^12]
139 * heap->free_head[3] - (2^12, 2^14]
140 * heap->free_head[4] - (2^14, MAX_SIZE]
143 malloc_elem_free_list_index(size_t size)
145 #define MALLOC_MINSIZE_LOG2 8
146 #define MALLOC_LOG2_INCREMENT 2
151 if (size <= (1UL << MALLOC_MINSIZE_LOG2))
154 /* Find next power of 2 >= size. */
155 log2 = sizeof(size) * 8 - __builtin_clzl(size-1);
157 /* Compute freelist index, based on log2(size). */
158 index = (log2 - MALLOC_MINSIZE_LOG2 + MALLOC_LOG2_INCREMENT - 1) /
159 MALLOC_LOG2_INCREMENT;
161 return (index <= RTE_HEAP_NUM_FREELISTS-1?
162 index: RTE_HEAP_NUM_FREELISTS-1);
166 * Add the specified element to its heap's free list.
169 malloc_elem_free_list_insert(struct malloc_elem *elem)
171 size_t idx = malloc_elem_free_list_index(elem->size - MALLOC_ELEM_HEADER_LEN);
173 elem->state = ELEM_FREE;
174 LIST_INSERT_HEAD(&elem->heap->free_head[idx], elem, free_list);
178 * Remove the specified element from its heap's free list.
181 elem_free_list_remove(struct malloc_elem *elem)
183 LIST_REMOVE(elem, free_list);
187 * reserve a block of data in an existing malloc_elem. If the malloc_elem
188 * is much larger than the data block requested, we split the element in two.
189 * This function is only called from malloc_heap_alloc so parameter checking
190 * is not done here, as it's done there previously.
193 malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align)
195 struct malloc_elem *new_elem = elem_start_pt(elem, size, align);
196 const unsigned old_elem_size = (uintptr_t)new_elem - (uintptr_t)elem;
198 if (old_elem_size < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE){
199 /* don't split it, pad the element instead */
200 elem->state = ELEM_BUSY;
201 elem->pad = old_elem_size;
203 /* put a dummy header in padding, to point to real element header */
204 if (elem->pad > 0){ /* pad will be at least 64-bytes, as everything
205 * is cache-line aligned */
206 new_elem->pad = elem->pad;
207 new_elem->state = ELEM_PAD;
208 new_elem->size = elem->size - elem->pad;
209 set_header(new_elem);
211 /* remove element from free list */
212 elem_free_list_remove(elem);
217 /* we are going to split the element in two. The original element
218 * remains free, and the new element is the one allocated.
219 * Re-insert original element, in case its new size makes it
220 * belong on a different list.
222 elem_free_list_remove(elem);
223 split_elem(elem, new_elem);
224 new_elem->state = ELEM_BUSY;
225 malloc_elem_free_list_insert(elem);
231 * joing two struct malloc_elem together. elem1 and elem2 must
232 * be contiguous in memory.
235 join_elem(struct malloc_elem *elem1, struct malloc_elem *elem2)
237 struct malloc_elem *next = RTE_PTR_ADD(elem2, elem2->size);
238 elem1->size += elem2->size;
243 * free a malloc_elem block by adding it to the free list. If the
244 * blocks either immediately before or immediately after newly freed block
245 * are also free, the blocks are merged together.
248 malloc_elem_free(struct malloc_elem *elem)
250 if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
253 rte_spinlock_lock(&(elem->heap->lock));
254 struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size);
255 if (next->state == ELEM_FREE){
256 /* remove from free list, join to this one */
257 elem_free_list_remove(next);
258 join_elem(elem, next);
261 /* check if previous element is free, if so join with it and return,
262 * need to re-insert in free list, as that element's size is changing
264 if (elem->prev != NULL && elem->prev->state == ELEM_FREE) {
265 elem_free_list_remove(elem->prev);
266 join_elem(elem->prev, elem);
267 malloc_elem_free_list_insert(elem->prev);
269 /* otherwise add ourselves to the free list */
271 malloc_elem_free_list_insert(elem);
274 /* decrease heap's count of allocated elements */
275 elem->heap->alloc_count--;
276 rte_spinlock_unlock(&(elem->heap->lock));
282 * attempt to resize a malloc_elem by expanding into any free space
283 * immediately after it in memory.
286 malloc_elem_resize(struct malloc_elem *elem, size_t size)
288 const size_t new_size = size + MALLOC_ELEM_OVERHEAD;
289 /* if we request a smaller size, then always return ok */
290 const size_t current_size = elem->size - elem->pad;
291 if (current_size >= new_size)
294 struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size);
295 rte_spinlock_lock(&elem->heap->lock);
296 if (next ->state != ELEM_FREE)
298 if (current_size + next->size < new_size)
301 /* we now know the element fits, so remove from free list,
304 elem_free_list_remove(next);
305 join_elem(elem, next);
307 if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD){
308 /* now we have a big block together. Lets cut it down a bit, by splitting */
309 struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
310 split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE);
311 split_elem(elem, split_pt);
312 malloc_elem_free_list_insert(split_pt);
314 rte_spinlock_unlock(&elem->heap->lock);
318 rte_spinlock_unlock(&elem->heap->lock);