+int
+rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_memseg_list *msl;
+ struct rte_fbarray *arr;
+ int msl_idx, seg_idx, ret;
+
+ if (ms == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ msl = rte_mem_virt2memseg_list(ms->addr);
+ if (msl == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ arr = &msl->memseg_arr;
+
+ msl_idx = msl - mcfg->memsegs;
+ seg_idx = rte_fbarray_find_idx(arr, ms);
+
+ if (!rte_fbarray_is_used(arr, seg_idx)) {
+ rte_errno = ENOENT;
+ return -1;
+ }
+
+ /* segment fd API is not supported for external segments */
+ if (msl->external) {
+ rte_errno = ENOTSUP;
+ return -1;
+ }
+
+ ret = eal_memalloc_get_seg_fd(msl_idx, seg_idx);
+ if (ret < 0) {
+ rte_errno = -ret;
+ ret = -1;
+ }
+ return ret;
+}
+
+int
+rte_memseg_get_fd(const struct rte_memseg *ms)
+{
+ int ret;
+
+ rte_mcfg_mem_read_lock();
+ ret = rte_memseg_get_fd_thread_unsafe(ms);
+ rte_mcfg_mem_read_unlock();
+
+ return ret;
+}
+
+int
+rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
+ size_t *offset)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_memseg_list *msl;
+ struct rte_fbarray *arr;
+ int msl_idx, seg_idx, ret;
+
+ if (ms == NULL || offset == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ msl = rte_mem_virt2memseg_list(ms->addr);
+ if (msl == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ arr = &msl->memseg_arr;
+
+ msl_idx = msl - mcfg->memsegs;
+ seg_idx = rte_fbarray_find_idx(arr, ms);
+
+ if (!rte_fbarray_is_used(arr, seg_idx)) {
+ rte_errno = ENOENT;
+ return -1;
+ }
+
+ /* segment fd API is not supported for external segments */
+ if (msl->external) {
+ rte_errno = ENOTSUP;
+ return -1;
+ }
+
+ ret = eal_memalloc_get_seg_fd_offset(msl_idx, seg_idx, offset);
+ if (ret < 0) {
+ rte_errno = -ret;
+ ret = -1;
+ }
+ return ret;
+}
+
+int
+rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset)
+{
+ int ret;
+
+ rte_mcfg_mem_read_lock();
+ ret = rte_memseg_get_fd_offset_thread_unsafe(ms, offset);
+ rte_mcfg_mem_read_unlock();
+
+ return ret;
+}
+
+int
+rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[],
+ unsigned int n_pages, size_t page_sz)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ unsigned int socket_id, n;
+ int ret = 0;
+
+ if (va_addr == NULL || page_sz == 0 || len == 0 ||
+ !rte_is_power_of_2(page_sz) ||
+ RTE_ALIGN(len, page_sz) != len ||
+ ((len / page_sz) != n_pages && iova_addrs != NULL) ||
+ !rte_is_aligned(va_addr, page_sz)) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ rte_mcfg_mem_write_lock();
+
+ /* make sure the segment doesn't already exist */
+ if (malloc_heap_find_external_seg(va_addr, len) != NULL) {
+ rte_errno = EEXIST;
+ ret = -1;
+ goto unlock;
+ }
+
+ /* get next available socket ID */
+ socket_id = mcfg->next_socket_id;
+ if (socket_id > INT32_MAX) {
+ RTE_LOG(ERR, EAL, "Cannot assign new socket ID's\n");
+ rte_errno = ENOSPC;
+ ret = -1;
+ goto unlock;
+ }
+
+ /* we can create a new memseg */
+ n = len / page_sz;
+ if (malloc_heap_create_external_seg(va_addr, iova_addrs, n,
+ page_sz, "extmem", socket_id) == NULL) {
+ ret = -1;
+ goto unlock;
+ }
+
+ /* memseg list successfully created - increment next socket ID */
+ mcfg->next_socket_id++;
+unlock:
+ rte_mcfg_mem_write_unlock();
+ return ret;
+}
+
+int
+rte_extmem_unregister(void *va_addr, size_t len)
+{
+ struct rte_memseg_list *msl;
+ int ret = 0;
+
+ if (va_addr == NULL || len == 0) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ rte_mcfg_mem_write_lock();
+
+ /* find our segment */
+ msl = malloc_heap_find_external_seg(va_addr, len);
+ if (msl == NULL) {
+ rte_errno = ENOENT;
+ ret = -1;
+ goto unlock;
+ }
+
+ ret = malloc_heap_destroy_external_seg(msl);
+unlock:
+ rte_mcfg_mem_write_unlock();
+ return ret;
+}
+
+static int
+sync_memory(void *va_addr, size_t len, bool attach)
+{
+ struct rte_memseg_list *msl;
+ int ret = 0;
+
+ if (va_addr == NULL || len == 0) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ rte_mcfg_mem_write_lock();
+
+ /* find our segment */
+ msl = malloc_heap_find_external_seg(va_addr, len);
+ if (msl == NULL) {
+ rte_errno = ENOENT;
+ ret = -1;
+ goto unlock;
+ }
+ if (attach)
+ ret = rte_fbarray_attach(&msl->memseg_arr);
+ else
+ ret = rte_fbarray_detach(&msl->memseg_arr);
+
+unlock:
+ rte_mcfg_mem_write_unlock();
+ return ret;
+}
+
+int
+rte_extmem_attach(void *va_addr, size_t len)
+{
+ return sync_memory(va_addr, len, true);
+}
+
+int
+rte_extmem_detach(void *va_addr, size_t len)
+{
+ return sync_memory(va_addr, len, false);
+}
+
+/* detach all EAL memory */
+int
+rte_eal_memory_detach(void)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ size_t page_sz = rte_mem_page_size();
+ unsigned int i;
+
+ rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+
+ /* detach internal memory subsystem data first */
+ if (eal_memalloc_cleanup())
+ RTE_LOG(ERR, EAL, "Could not release memory subsystem data\n");
+
+ for (i = 0; i < RTE_DIM(mcfg->memsegs); i++) {
+ struct rte_memseg_list *msl = &mcfg->memsegs[i];
+
+ /* skip uninitialized segments */
+ if (msl->base_va == NULL)
+ continue;
+ /*
+ * external segments are supposed to be detached at this point,
+ * but if they aren't, we can't really do anything about it,
+ * because if we skip them here, they'll become invalid after
+ * we unmap the memconfig anyway. however, if this is externally
+ * referenced memory, we have no business unmapping it.
+ */
+ if (!msl->external)
+ if (rte_mem_unmap(msl->base_va, msl->len) != 0)
+ RTE_LOG(ERR, EAL, "Could not unmap memory: %s\n",
+ strerror(errno));
+
+ /*
+ * we are detaching the fbarray rather than destroying because
+ * other processes might still reference this fbarray, and we
+ * have no way of knowing if they still do.
+ */
+ if (rte_fbarray_detach(&msl->memseg_arr))
+ RTE_LOG(ERR, EAL, "Could not detach fbarray: %s\n",
+ rte_strerror(rte_errno));
+ }
+ rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+
+ /*
+ * we've detached the memseg lists, so we can unmap the shared mem
+ * config - we can't zero it out because it might still be referenced
+ * by other processes.
+ */
+ rte_mem_unmap(mcfg, RTE_ALIGN(sizeof(*mcfg), page_sz));
+ rte_eal_get_configuration()->mem_config = NULL;
+
+ return 0;
+}
+