net/virtio-user: fix packed ring server mode
[dpdk.git] / drivers / net / virtio / virtio_user / vhost_kernel.c
index a93fe5b..2c80507 100644 (file)
@@ -8,7 +8,6 @@
 #include <unistd.h>
 
 #include <rte_memory.h>
-#include <rte_eal_memconfig.h>
 
 #include "vhost.h"
 #include "virtio_user_dev.h"
@@ -70,41 +69,44 @@ static uint64_t vhost_req_user_to_kernel[] = {
        [VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
 };
 
-struct walk_arg {
-       struct vhost_memory_kernel *vm;
-       uint32_t region_nr;
-};
 static int
-add_memory_region(const struct rte_memseg_list *msl __rte_unused,
-               const struct rte_memseg *ms, size_t len, void *arg)
+add_memseg_list(const struct rte_memseg_list *msl, void *arg)
 {
-       struct walk_arg *wa = arg;
+       struct vhost_memory_kernel *vm = arg;
        struct vhost_memory_region *mr;
        void *start_addr;
+       uint64_t len;
+
+       if (msl->external)
+               return 0;
 
-       if (wa->region_nr >= max_regions)
+       if (vm->nregions >= max_regions)
                return -1;
 
-       mr = &wa->vm->regions[wa->region_nr++];
-       start_addr = ms->addr;
+       start_addr = msl->base_va;
+       len = msl->page_sz * msl->memseg_arr.len;
+
+       mr = &vm->regions[vm->nregions++];
 
        mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
        mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
        mr->memory_size = len;
-       mr->mmap_offset = 0;
+       mr->mmap_offset = 0; /* flags_padding */
+
+       PMD_DRV_LOG(DEBUG, "index=%u addr=%p len=%" PRIu64,
+                       vm->nregions - 1, start_addr, len);
 
        return 0;
 }
 
-/* By default, vhost kernel module allows 64 regions, but DPDK allows
- * 256 segments. As a relief, below function merges those virtually
- * adjacent memsegs into one region.
+/* By default, vhost kernel module allows 64 regions, but DPDK may
+ * have much more memory regions. Below function will treat each
+ * contiguous memory space reserved by DPDK as one region.
  */
 static struct vhost_memory_kernel *
 prepare_vhost_memory_kernel(void)
 {
        struct vhost_memory_kernel *vm;
-       struct walk_arg wa;
 
        vm = malloc(sizeof(struct vhost_memory_kernel) +
                        max_regions *
@@ -112,20 +114,18 @@ prepare_vhost_memory_kernel(void)
        if (!vm)
                return NULL;
 
-       wa.region_nr = 0;
-       wa.vm = vm;
+       vm->nregions = 0;
+       vm->padding = 0;
 
        /*
         * The memory lock has already been taken by memory subsystem
         * or virtio_user_start_device().
         */
-       if (rte_memseg_contig_walk_thread_unsafe(add_memory_region, &wa) < 0) {
+       if (rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm) < 0) {
                free(vm);
                return NULL;
        }
 
-       vm->nregions = wa.region_nr;
-       vm->padding = 0;
        return vm;
 }
 
@@ -330,16 +330,34 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
 
        vhostfd = dev->vhostfds[pair_idx];
 
+       if (dev->qp_enabled[pair_idx] == enable)
+               return 0;
+
        if (!enable) {
-               if (dev->tapfds[pair_idx] >= 0) {
-                       close(dev->tapfds[pair_idx]);
-                       dev->tapfds[pair_idx] = -1;
+               tapfd = dev->tapfds[pair_idx];
+               if (vhost_kernel_set_backend(vhostfd, -1) < 0) {
+                       PMD_DRV_LOG(ERR, "fail to set backend for vhost kernel");
+                       return -1;
                }
-               return vhost_kernel_set_backend(vhostfd, -1);
-       } else if (dev->tapfds[pair_idx] >= 0) {
+               if (req_mq && vhost_kernel_tap_set_queue(tapfd, false) < 0) {
+                       PMD_DRV_LOG(ERR, "fail to disable tap for vhost kernel");
+                       return -1;
+               }
+               dev->qp_enabled[pair_idx] = false;
                return 0;
        }
 
+       if (dev->tapfds[pair_idx] >= 0) {
+               tapfd = dev->tapfds[pair_idx];
+               if (vhost_kernel_tap_set_offload(tapfd, dev->features) == -1)
+                       return -1;
+               if (req_mq && vhost_kernel_tap_set_queue(tapfd, true) < 0) {
+                       PMD_DRV_LOG(ERR, "fail to enable tap for vhost kernel");
+                       return -1;
+               }
+               goto set_backend;
+       }
+
        if ((dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF)) ||
            (dev->features & (1ULL << VIRTIO_F_VERSION_1)))
                hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
@@ -353,17 +371,19 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
                return -1;
        }
 
+       dev->tapfds[pair_idx] = tapfd;
+
+set_backend:
        if (vhost_kernel_set_backend(vhostfd, tapfd) < 0) {
                PMD_DRV_LOG(ERR, "fail to set backend for vhost kernel");
-               close(tapfd);
                return -1;
        }
 
-       dev->tapfds[pair_idx] = tapfd;
+       dev->qp_enabled[pair_idx] = true;
        return 0;
 }
 
-struct virtio_user_backend_ops ops_kernel = {
+struct virtio_user_backend_ops virtio_ops_kernel = {
        .setup = vhost_kernel_setup,
        .send_request = vhost_kernel_ioctl,
        .enable_qp = vhost_kernel_enable_queue_pair