#include <rte_common.h>
#include <rte_errno.h>
#include <rte_vfio.h>
+#include <rte_vhost.h>
#include "efx.h"
#include "sfc_vdpa.h"
{
uint64_t mcdi_iova;
size_t mcdi_buff_size;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz = NULL;
int numa_node = sva->pdev->device.numa_node;
int ret;
mcdi_buff_size = RTE_ALIGN_CEIL(len, PAGE_SIZE);
+ ret = snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%s",
+ sva->pdev->name, name);
+ if (ret < 0 || ret >= RTE_MEMZONE_NAMESIZE) {
+ sfc_vdpa_err(sva, "%s_%s too long to fit in mz_name",
+ sva->pdev->name, name);
+ return -EINVAL;
+ }
- sfc_vdpa_log_init(sva, "name=%s, len=%zu", name, len);
+ sfc_vdpa_log_init(sva, "name=%s, len=%zu", mz_name, len);
- mz = rte_memzone_reserve_aligned(name, mcdi_buff_size,
+ mz = rte_memzone_reserve_aligned(mz_name, mcdi_buff_size,
numa_node,
RTE_MEMZONE_IOVA_CONTIG,
PAGE_SIZE);
if (mz == NULL) {
sfc_vdpa_err(sva, "cannot reserve memory for %s: len=%#x: %s",
- name, (unsigned int)len, rte_strerror(rte_errno));
+ mz_name, (unsigned int)len,
+ rte_strerror(rte_errno));
return -ENOMEM;
}
memset(esmp, 0, sizeof(*esmp));
}
+int
+sfc_vdpa_dma_map(struct sfc_vdpa_ops_data *ops_data, bool do_map)
+{
+ uint32_t i, j;
+ int rc;
+ struct rte_vhost_memory *vhost_mem = NULL;
+ struct rte_vhost_mem_region *mem_reg = NULL;
+ int vfio_container_fd;
+ void *dev;
+
+ dev = ops_data->dev_handle;
+ vfio_container_fd =
+ sfc_vdpa_adapter_by_dev_handle(dev)->vfio_container_fd;
+
+ rc = rte_vhost_get_mem_table(ops_data->vid, &vhost_mem);
+ if (rc < 0) {
+ sfc_vdpa_err(dev,
+ "failed to get VM memory layout");
+ goto error;
+ }
+
+ for (i = 0; i < vhost_mem->nregions; i++) {
+ mem_reg = &vhost_mem->regions[i];
+
+ if (do_map) {
+ rc = rte_vfio_container_dma_map(vfio_container_fd,
+ mem_reg->host_user_addr,
+ mem_reg->guest_phys_addr,
+ mem_reg->size);
+ if (rc < 0) {
+ sfc_vdpa_err(dev,
+ "DMA map failed : %s",
+ rte_strerror(rte_errno));
+ goto failed_vfio_dma_map;
+ }
+ } else {
+ rc = rte_vfio_container_dma_unmap(vfio_container_fd,
+ mem_reg->host_user_addr,
+ mem_reg->guest_phys_addr,
+ mem_reg->size);
+ if (rc < 0) {
+ sfc_vdpa_err(dev,
+ "DMA unmap failed : %s",
+ rte_strerror(rte_errno));
+ goto error;
+ }
+ }
+ }
+
+ free(vhost_mem);
+
+ return 0;
+
+failed_vfio_dma_map:
+ for (j = 0; j < i; j++) {
+ mem_reg = &vhost_mem->regions[j];
+ rte_vfio_container_dma_unmap(vfio_container_fd,
+ mem_reg->host_user_addr,
+ mem_reg->guest_phys_addr,
+ mem_reg->size);
+ }
+
+error:
+ free(vhost_mem);
+
+ return rc;
+}
+
static int
sfc_vdpa_mem_bar_init(struct sfc_vdpa_adapter *sva,
const efx_bar_region_t *mem_ebrp)
if (rc != 0)
goto fail_estimate_rsrc_limits;
+ sfc_vdpa_log_init(sva, "init virtio");
+ rc = efx_virtio_init(enp);
+ if (rc != 0) {
+ sfc_vdpa_err(sva, "virtio init failed: %s", rte_strerror(rc));
+ goto fail_virtio_init;
+ }
+
+ sfc_vdpa_log_init(sva, "init filter");
+ rc = efx_filter_init(enp);
+ if (rc != 0) {
+ sfc_vdpa_err(sva, "filter init failed: %s", rte_strerror(rc));
+ goto fail_filter_init;
+ }
+
sfc_vdpa_log_init(sva, "done");
return 0;
+fail_filter_init:
+ efx_virtio_fini(enp);
+
+fail_virtio_init:
+ efx_nic_fini(enp);
+
fail_estimate_rsrc_limits:
fail_nic_reset:
efx_nic_unprobe(enp);
sfc_vdpa_log_init(sva, "entry");
+ sfc_vdpa_log_init(sva, "virtio fini");
+ efx_virtio_fini(enp);
+
sfc_vdpa_log_init(sva, "unprobe nic");
efx_nic_unprobe(enp);