void *map_addr;
size_t alloc_sz;
int n_segs;
+ bool callback_triggered = false;
alloc_sz = RTE_ALIGN_CEIL(align + elt_size +
MALLOC_ELEM_TRAILER_LEN, pg_sz);
map_addr = ms[0]->addr;
+ /* notify user about changes in memory map */
+ eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, map_addr, alloc_sz);
+
/* notify other processes that this has happened */
if (request_sync()) {
/* we couldn't ensure all processes have mapped memory,
* so free it back and notify everyone that it's been
* freed back.
+ *
+ * technically, we could've avoided adding memory addresses to
+ * the map, but that would've led to inconsistent behavior
+ * between primary and secondary processes, as those get
+ * callbacks during sync. therefore, force primary process to
+ * do alloc-and-rollback syncs as well.
*/
+ callback_triggered = true;
goto free_elem;
}
heap->total_size += alloc_sz;
return 0;
free_elem:
+ if (callback_triggered)
+ eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
+ map_addr, alloc_sz);
+
rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
request_sync();
heap->total_size -= aligned_len;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* notify user about changes in memory map */
+ eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
+ aligned_start, aligned_len);
+
/* don't care if any of this fails */
malloc_heap_free_pages(aligned_start, aligned_len);
* already removed from the heap, so it is, for all intents and
* purposes, hidden from the rest of DPDK even if some other
* process (including this one) may have these pages mapped.
+ *
+ * notifications about deallocated memory happen during sync.
*/
request_to_primary(&req);
}
diff_len = RTE_MIN(chunk_len, diff_len);
+ /* if we are freeing memory, notify the application */
+ if (!used) {
+ struct rte_memseg *ms;
+ void *start_va;
+ size_t len, page_sz;
+
+ ms = rte_fbarray_get(l_arr, start);
+ start_va = ms->addr;
+ page_sz = (size_t)primary_msl->page_sz;
+ len = page_sz * diff_len;
+
+ eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
+ start_va, len);
+ }
+
for (i = 0; i < diff_len; i++) {
struct rte_memseg *p_ms, *l_ms;
int seg_idx = start + i;
}
}
+ /* if we just allocated memory, notify the application */
+ if (used) {
+ struct rte_memseg *ms;
+ void *start_va;
+ size_t len, page_sz;
+
+ ms = rte_fbarray_get(l_arr, start);
+ start_va = ms->addr;
+ page_sz = (size_t)primary_msl->page_sz;
+ len = page_sz * diff_len;
+
+ eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
+ start_va, len);
+ }
+
/* calculate how much we can advance until next chunk */
diff_len = used ?
rte_fbarray_find_contig_used(l_arr, start) :
create.levels = 1;
if (do_map) {
+ void *addr;
/* re-create window and remap the entire memory */
if (iova > create.window_size) {
if (vfio_spapr_create_new_dma_window(vfio_container_fd,
/* now that we've remapped all of the memory that was present
* before, map the segment that we were requested to map.
+ *
+ * however, if we were called by the callback, the memory we
+ * were called with was already in the memseg list, so previous
+ * mapping should've mapped that segment already.
+ *
+ * virt2memseg_list is a relatively cheap check, so use that. if
+ * memory is within any memseg list, it's a memseg, so it's
+ * already mapped.
*/
- if (vfio_spapr_dma_do_map(vfio_container_fd,
- vaddr, iova, len, 1) < 0) {
+ addr = (void *)(uintptr_t)vaddr;
+ if (rte_mem_virt2memseg_list(addr) == NULL &&
+ vfio_spapr_dma_do_map(vfio_container_fd,
+ vaddr, iova, len, 1) < 0) {
RTE_LOG(ERR, EAL, "Could not map segment\n");
ret = -1;
goto out;