vdpa/mlx5: add task ring for multi-thread management
[dpdk.git] / drivers / vdpa / sfc / sfc_vdpa_hw.c
index 5bf4e43..a7018b1 100644 (file)
@@ -7,6 +7,7 @@
 #include <rte_common.h>
 #include <rte_errno.h>
 #include <rte_vfio.h>
+#include <rte_vhost.h>
 
 #include "efx.h"
 #include "sfc_vdpa.h"
@@ -24,21 +25,30 @@ sfc_vdpa_dma_alloc(struct sfc_vdpa_adapter *sva, const char *name,
 {
        uint64_t mcdi_iova;
        size_t mcdi_buff_size;
+       char mz_name[RTE_MEMZONE_NAMESIZE];
        const struct rte_memzone *mz = NULL;
        int numa_node = sva->pdev->device.numa_node;
        int ret;
 
        mcdi_buff_size = RTE_ALIGN_CEIL(len, PAGE_SIZE);
+       ret = snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%s",
+                      sva->pdev->name, name);
+       if (ret < 0 || ret >= RTE_MEMZONE_NAMESIZE) {
+               sfc_vdpa_err(sva, "%s_%s too long to fit in mz_name",
+                            sva->pdev->name, name);
+               return -EINVAL;
+       }
 
-       sfc_vdpa_log_init(sva, "name=%s, len=%zu", name, len);
+       sfc_vdpa_log_init(sva, "name=%s, len=%zu", mz_name, len);
 
-       mz = rte_memzone_reserve_aligned(name, mcdi_buff_size,
+       mz = rte_memzone_reserve_aligned(mz_name, mcdi_buff_size,
                                         numa_node,
                                         RTE_MEMZONE_IOVA_CONTIG,
                                         PAGE_SIZE);
        if (mz == NULL) {
                sfc_vdpa_err(sva, "cannot reserve memory for %s: len=%#x: %s",
-                            name, (unsigned int)len, rte_strerror(rte_errno));
+                            mz_name, (unsigned int)len,
+                            rte_strerror(rte_errno));
                return -ENOMEM;
        }
 
@@ -107,6 +117,74 @@ sfc_vdpa_dma_free(struct sfc_vdpa_adapter *sva, efsys_mem_t *esmp)
        memset(esmp, 0, sizeof(*esmp));
 }
 
+int
+sfc_vdpa_dma_map(struct sfc_vdpa_ops_data *ops_data, bool do_map)
+{
+       uint32_t i, j;
+       int rc;
+       struct rte_vhost_memory *vhost_mem = NULL;
+       struct rte_vhost_mem_region *mem_reg = NULL;
+       int vfio_container_fd;
+       void *dev;
+
+       dev = ops_data->dev_handle;
+       vfio_container_fd =
+               sfc_vdpa_adapter_by_dev_handle(dev)->vfio_container_fd;
+
+       rc = rte_vhost_get_mem_table(ops_data->vid, &vhost_mem);
+       if (rc < 0) {
+               sfc_vdpa_err(dev,
+                            "failed to get VM memory layout");
+               goto error;
+       }
+
+       for (i = 0; i < vhost_mem->nregions; i++) {
+               mem_reg = &vhost_mem->regions[i];
+
+               if (do_map) {
+                       rc = rte_vfio_container_dma_map(vfio_container_fd,
+                                               mem_reg->host_user_addr,
+                                               mem_reg->guest_phys_addr,
+                                               mem_reg->size);
+                       if (rc < 0) {
+                               sfc_vdpa_err(dev,
+                                            "DMA map failed : %s",
+                                            rte_strerror(rte_errno));
+                               goto failed_vfio_dma_map;
+                       }
+               } else {
+                       rc = rte_vfio_container_dma_unmap(vfio_container_fd,
+                                               mem_reg->host_user_addr,
+                                               mem_reg->guest_phys_addr,
+                                               mem_reg->size);
+                       if (rc < 0) {
+                               sfc_vdpa_err(dev,
+                                            "DMA unmap failed : %s",
+                                            rte_strerror(rte_errno));
+                               goto error;
+                       }
+               }
+       }
+
+       free(vhost_mem);
+
+       return 0;
+
+failed_vfio_dma_map:
+       for (j = 0; j < i; j++) {
+               mem_reg = &vhost_mem->regions[j];
+               rte_vfio_container_dma_unmap(vfio_container_fd,
+                                            mem_reg->host_user_addr,
+                                            mem_reg->guest_phys_addr,
+                                            mem_reg->size);
+       }
+
+error:
+       free(vhost_mem);
+
+       return rc;
+}
+
 static int
 sfc_vdpa_mem_bar_init(struct sfc_vdpa_adapter *sva,
                      const efx_bar_region_t *mem_ebrp)
@@ -283,10 +361,20 @@ sfc_vdpa_hw_init(struct sfc_vdpa_adapter *sva)
                goto fail_virtio_init;
        }
 
+       sfc_vdpa_log_init(sva, "init filter");
+       rc = efx_filter_init(enp);
+       if (rc != 0) {
+               sfc_vdpa_err(sva, "filter init failed: %s", rte_strerror(rc));
+               goto fail_filter_init;
+       }
+
        sfc_vdpa_log_init(sva, "done");
 
        return 0;
 
+fail_filter_init:
+       efx_virtio_fini(enp);
+
 fail_virtio_init:
        efx_nic_fini(enp);