msl->page_sz = page_sz;
msl->socket_id = socket_id;
msl->base_va = NULL;
+ msl->heap = 1; /* mark it as a heap segment */
RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
(size_t)page_sz >> 10, socket_id);
msl->page_sz = page_sz;
msl->socket_id = 0;
msl->len = internal_config.memory;
+ msl->heap = 1;
/* we're in single-file segments mode, so only the segment list
* fd needs to be set up.
mem_sz = msl->len;
munmap(msl->base_va, mem_sz);
msl->base_va = NULL;
+ msl->heap = 0;
/* destroy backing fbarray */
rte_fbarray_destroy(&msl->memseg_arr);
{
int *vfio_container_fd = arg;
- if (msl->external)
+ /* skip external memory that isn't a heap */
+ if (msl->external && !msl->heap)
+ return 0;
+
+ /* skip any segments with invalid IOVA addresses */
+ if (ms->iova == RTE_BAD_IOVA)
+ return 0;
+
+ /* if IOVA mode is VA, we've already mapped the internal segments */
+ if (!msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)
return 0;
return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
/* with IOVA as VA mode, we can get away with mapping contiguous
* chunks rather than going page-by-page.
*/
- return rte_memseg_contig_walk(type1_map_contig,
+ int ret = rte_memseg_contig_walk(type1_map_contig,
&vfio_container_fd);
+ if (ret)
+ return ret;
+ /* we have to continue the walk because we've skipped the
+ * external segments during the config walk.
+ */
}
return rte_memseg_walk(type1_map, &vfio_container_fd);
}
{
struct spapr_remap_walk_param *param = arg;
- if (msl->external || ms->addr_64 == param->addr_64)
+ /* skip external memory that isn't a heap */
+ if (msl->external && !msl->heap)
+ return 0;
+
+ /* skip any segments with invalid IOVA addresses */
+ if (ms->iova == RTE_BAD_IOVA)
+ return 0;
+
+ if (ms->addr_64 == param->addr_64)
return 0;
return vfio_spapr_dma_do_map(param->vfio_container_fd, ms->addr_64, ms->iova,
{
struct spapr_remap_walk_param *param = arg;
- if (msl->external || ms->addr_64 == param->addr_64)
+ /* skip external memory that isn't a heap */
+ if (msl->external && !msl->heap)
+ return 0;
+
+ /* skip any segments with invalid IOVA addresses */
+ if (ms->iova == RTE_BAD_IOVA)
+ return 0;
+
+ if (ms->addr_64 == param->addr_64)
return 0;
return vfio_spapr_dma_do_map(param->vfio_container_fd, ms->addr_64, ms->iova,
struct spapr_walk_param *param = arg;
uint64_t max = ms->iova + ms->len;
- if (msl->external)
+ /* skip external memory that isn't a heap */
+ if (msl->external && !msl->heap)
+ return 0;
+
+ /* skip any segments with invalid IOVA addresses */
+ if (ms->iova == RTE_BAD_IOVA)
return 0;
/* do not iterate ms we haven't mapped yet */