X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Flinuxapp%2Feal%2Feal_memalloc.c;h=1f553dd2a6c7c81ac0bb81005600ac0c944d3e48;hb=b4953225cea42bb4da370e012a6be41c54c80967;hp=31fb55e948cc5626610fb2afd8c1c3a28307f2d7;hpb=524e43c2ad9a347ed3ea8b1d23497a01eb9e47cf;p=dpdk.git diff --git a/lib/librte_eal/linuxapp/eal/eal_memalloc.c b/lib/librte_eal/linuxapp/eal/eal_memalloc.c index 31fb55e948..1f553dd2a6 100644 --- a/lib/librte_eal/linuxapp/eal/eal_memalloc.c +++ b/lib/librte_eal/linuxapp/eal/eal_memalloc.c @@ -212,6 +212,32 @@ is_zero_length(int fd) return st.st_blocks == 0; } +/* we cannot use rte_memseg_list_walk() here because we will be holding a + * write lock whenever we enter every function in this file, however copying + * the same iteration code everywhere is not ideal as well. so, use a lockless + * copy of memseg list walk here. + */ +static int +memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg) +{ + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + int i, ret = 0; + + for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) { + struct rte_memseg_list *msl = &mcfg->memsegs[i]; + + if (msl->base_va == NULL) + continue; + + ret = func(msl, arg); + if (ret < 0) + return -1; + if (ret > 0) + return 1; + } + return 0; +} + static int get_seg_fd(char *path, int buflen, struct hugepage_info *hi, unsigned int list_idx, unsigned int seg_idx) @@ -740,7 +766,7 @@ eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz, wa.socket = socket; wa.segs_allocated = 0; - ret = rte_memseg_list_walk(alloc_seg_walk, &wa); + ret = memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa); if (ret == 0) { RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n", __func__); @@ -781,6 +807,13 @@ eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs) struct free_walk_param wa; int i, walk_res; + /* if this page is marked as unfreeable, fail */ + if (cur->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) { + RTE_LOG(DEBUG, EAL, "Page is not allowed to be freed\n"); + ret = -1; + continue; + } + memset(&wa, 0, sizeof(wa)); for (i = 0; i < (int)RTE_DIM(internal_config.hugepage_info); @@ -798,7 +831,7 @@ eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs) wa.ms = cur; wa.hi = hi; - walk_res = rte_memseg_list_walk(free_seg_walk, &wa); + walk_res = memseg_list_walk_thread_unsafe(free_seg_walk, &wa); if (walk_res == 1) continue; if (walk_res == 0) @@ -851,6 +884,21 @@ sync_chunk(struct rte_memseg_list *primary_msl, diff_len = RTE_MIN(chunk_len, diff_len); + /* if we are freeing memory, notify the application */ + if (!used) { + struct rte_memseg *ms; + void *start_va; + size_t len, page_sz; + + ms = rte_fbarray_get(l_arr, start); + start_va = ms->addr; + page_sz = (size_t)primary_msl->page_sz; + len = page_sz * diff_len; + + eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE, + start_va, len); + } + for (i = 0; i < diff_len; i++) { struct rte_memseg *p_ms, *l_ms; int seg_idx = start + i; @@ -876,6 +924,21 @@ sync_chunk(struct rte_memseg_list *primary_msl, } } + /* if we just allocated memory, notify the application */ + if (used) { + struct rte_memseg *ms; + void *start_va; + size_t len, page_sz; + + ms = rte_fbarray_get(l_arr, start); + start_va = ms->addr; + page_sz = (size_t)primary_msl->page_sz; + len = page_sz * diff_len; + + eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, + start_va, len); + } + /* calculate how much we can advance until next chunk */ diff_len = used ? rte_fbarray_find_contig_used(l_arr, start) : @@ -1055,7 +1118,7 @@ eal_memalloc_sync_with_primary(void) if (rte_eal_process_type() == RTE_PROC_PRIMARY) return 0; - if (rte_memseg_list_walk(sync_walk, NULL)) + if (memseg_list_walk_thread_unsafe(sync_walk, NULL)) return -1; return 0; }