return;
}
+#ifdef RTE_ARCH_PPC_64
+ ms = rte_mem_virt2memseg(addr, msl);
+ while (cur_len < len) {
+ int idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+
+ rte_fbarray_set_free(&msl->memseg_arr, idx);
+ cur_len += ms->len;
+ ++ms;
+ }
+ cur_len = 0;
+#endif
/* memsegs are contiguous in memory */
ms = rte_mem_virt2memseg(addr, msl);
while (cur_len < len) {
cur_len += ms->len;
++ms;
}
+#ifdef RTE_ARCH_PPC_64
+ cur_len = 0;
+ ms = rte_mem_virt2memseg(addr, msl);
+ while (cur_len < len) {
+ int idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+
+ rte_fbarray_set_used(&msl->memseg_arr, idx);
+ cur_len += ms->len;
+ ++ms;
+ }
+#endif
}
static int
return 0;
}
-struct spapr_remap_walk_param {
- int vfio_container_fd;
- uint64_t addr_64;
-};
-
static int
vfio_spapr_map_walk(const struct rte_memseg_list *msl,
const struct rte_memseg *ms, void *arg)
{
- struct spapr_remap_walk_param *param = arg;
+ int *vfio_container_fd = arg;
/* skip external memory that isn't a heap */
if (msl->external && !msl->heap)
if (ms->iova == RTE_BAD_IOVA)
return 0;
- if (ms->addr_64 == param->addr_64)
- return 0;
-
- return vfio_spapr_dma_do_map(param->vfio_container_fd, ms->addr_64, ms->iova,
+ return vfio_spapr_dma_do_map(*vfio_container_fd, ms->addr_64, ms->iova,
ms->len, 1);
}
vfio_spapr_unmap_walk(const struct rte_memseg_list *msl,
const struct rte_memseg *ms, void *arg)
{
- struct spapr_remap_walk_param *param = arg;
+ int *vfio_container_fd = arg;
/* skip external memory that isn't a heap */
if (msl->external && !msl->heap)
if (ms->iova == RTE_BAD_IOVA)
return 0;
- if (ms->addr_64 == param->addr_64)
- return 0;
-
- return vfio_spapr_dma_do_map(param->vfio_container_fd, ms->addr_64, ms->iova,
+ return vfio_spapr_dma_do_map(*vfio_container_fd, ms->addr_64, ms->iova,
ms->len, 0);
}
struct spapr_walk_param {
uint64_t window_size;
uint64_t hugepage_sz;
- uint64_t addr_64;
};
static int
if (ms->iova == RTE_BAD_IOVA)
return 0;
- /* do not iterate ms we haven't mapped yet */
- if (param->addr_64 && ms->addr_64 == param->addr_64)
- return 0;
-
if (max > param->window_size) {
param->hugepage_sz = ms->hugepage_sz;
param->window_size = max;
/* try possible page_shift and levels for workaround */
uint32_t levels;
- for (levels = 1; levels <= info.ddw.levels; levels++) {
- uint32_t pgsizes = info.ddw.pgsizes;
-
- while (pgsizes != 0) {
- create->page_shift = 31 - __builtin_clz(pgsizes);
- create->levels = levels;
- ret = ioctl(vfio_container_fd,
- VFIO_IOMMU_SPAPR_TCE_CREATE, create);
- if (!ret)
- break;
- pgsizes &= ~(1 << create->page_shift);
- }
- if (!ret)
- break;
+ for (levels = create->levels + 1;
+ ret && levels <= info.ddw.levels; levels++) {
+ create->levels = levels;
+ ret = ioctl(vfio_container_fd,
+ VFIO_IOMMU_SPAPR_TCE_CREATE, create);
}
#endif
if (ret) {
/* check if window size needs to be adjusted */
memset(¶m, 0, sizeof(param));
- param.addr_64 = vaddr;
/* we're inside a callback so use thread-unsafe version */
if (rte_memseg_walk_thread_unsafe(vfio_spapr_window_size_walk,
if (do_map) {
/* re-create window and remap the entire memory */
if (iova + len > create.window_size) {
- struct spapr_remap_walk_param remap_param = {
- .vfio_container_fd = vfio_container_fd,
- .addr_64 = vaddr,
- };
-
/* release all maps before recreating the window */
if (rte_memseg_walk_thread_unsafe(vfio_spapr_unmap_walk,
- &remap_param) < 0) {
+ &vfio_container_fd) < 0) {
RTE_LOG(ERR, EAL, "Could not release DMA maps\n");
ret = -1;
goto out;
/* we're inside a callback, so use thread-unsafe version
*/
if (rte_memseg_walk_thread_unsafe(vfio_spapr_map_walk,
- &remap_param) < 0) {
+ &vfio_container_fd) < 0) {
RTE_LOG(ERR, EAL, "Could not recreate DMA maps\n");
ret = -1;
goto out;
struct spapr_walk_param param;
memset(¶m, 0, sizeof(param));
- param.addr_64 = 0UL;
/* create DMA window from 0 to max(phys_addr + len) */
rte_memseg_walk(vfio_spapr_window_size_walk, ¶m);