net/virtio-user: fix return value check
[dpdk.git] / drivers / net / virtio / virtio_user / vhost_kernel.c
index d3cbdbd..6b19180 100644 (file)
@@ -1,34 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2016 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
  */
 
 #include <sys/types.h>
@@ -99,70 +70,109 @@ static uint64_t vhost_req_user_to_kernel[] = {
        [VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
 };
 
-/* By default, vhost kernel module allows 64 regions, but DPDK allows
- * 256 segments. As a relief, below function merges those virtually
- * adjacent memsegs into one region.
+static int
+add_memseg_list(const struct rte_memseg_list *msl, void *arg)
+{
+       struct vhost_memory_kernel *vm = arg;
+       struct vhost_memory_region *mr;
+       void *start_addr;
+       uint64_t len;
+
+       if (msl->external)
+               return 0;
+
+       if (vm->nregions >= max_regions)
+               return -1;
+
+       start_addr = msl->base_va;
+       len = msl->page_sz * msl->memseg_arr.len;
+
+       mr = &vm->regions[vm->nregions++];
+
+       mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
+       mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
+       mr->memory_size = len;
+       mr->mmap_offset = 0; /* flags_padding */
+
+       PMD_DRV_LOG(DEBUG, "index=%u addr=%p len=%" PRIu64,
+                       vm->nregions - 1, start_addr, len);
+
+       return 0;
+}
+
+/* By default, vhost kernel module allows 64 regions, but DPDK may
+ * have much more memory regions. Below function will treat each
+ * contiguous memory space reserved by DPDK as one region.
  */
 static struct vhost_memory_kernel *
 prepare_vhost_memory_kernel(void)
 {
-       uint32_t i, j, k = 0;
-       struct rte_memseg *seg;
-       struct vhost_memory_region *mr;
        struct vhost_memory_kernel *vm;
 
        vm = malloc(sizeof(struct vhost_memory_kernel) +
-                   max_regions *
-                   sizeof(struct vhost_memory_region));
+                       max_regions *
+                       sizeof(struct vhost_memory_region));
+       if (!vm)
+               return NULL;
 
-       for (i = 0; i < RTE_MAX_MEMSEG; ++i) {
-               seg = &rte_eal_get_configuration()->mem_config->memseg[i];
-               if (!seg->addr)
-                       break;
+       vm->nregions = 0;
+       vm->padding = 0;
 
-               int new_region = 1;
+       /*
+        * The memory lock has already been taken by memory subsystem
+        * or virtio_user_start_device().
+        */
+       if (rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm) < 0) {
+               free(vm);
+               return NULL;
+       }
 
-               for (j = 0; j < k; ++j) {
-                       mr = &vm->regions[j];
+       return vm;
+}
 
-                       if (mr->userspace_addr + mr->memory_size ==
-                           (uint64_t)(uintptr_t)seg->addr) {
-                               mr->memory_size += seg->len;
-                               new_region = 0;
-                               break;
-                       }
-
-                       if ((uint64_t)(uintptr_t)seg->addr + seg->len ==
-                           mr->userspace_addr) {
-                               mr->guest_phys_addr =
-                                       (uint64_t)(uintptr_t)seg->addr;
-                               mr->userspace_addr =
-                                       (uint64_t)(uintptr_t)seg->addr;
-                               mr->memory_size += seg->len;
-                               new_region = 0;
-                               break;
-                       }
-               }
+/* with below features, vhost kernel does not need to do the checksum and TSO,
+ * these info will be passed to virtio_user through virtio net header.
+ */
+#define VHOST_KERNEL_GUEST_OFFLOADS_MASK       \
+       ((1ULL << VIRTIO_NET_F_GUEST_CSUM) |    \
+        (1ULL << VIRTIO_NET_F_GUEST_TSO4) |    \
+        (1ULL << VIRTIO_NET_F_GUEST_TSO6) |    \
+        (1ULL << VIRTIO_NET_F_GUEST_ECN)  |    \
+        (1ULL << VIRTIO_NET_F_GUEST_UFO))
+
+/* with below features, when flows from virtio_user to vhost kernel
+ * (1) if flows goes up through the kernel networking stack, it does not need
+ * to verify checksum, which can save CPU cycles;
+ * (2) if flows goes through a Linux bridge and outside from an interface
+ * (kernel driver), checksum and TSO will be done by GSO in kernel or even
+ * offloaded into real physical device.
+ */
+#define VHOST_KERNEL_HOST_OFFLOADS_MASK                \
+       ((1ULL << VIRTIO_NET_F_HOST_TSO4) |     \
+        (1ULL << VIRTIO_NET_F_HOST_TSO6) |     \
+        (1ULL << VIRTIO_NET_F_CSUM))
 
-               if (new_region == 0)
-                       continue;
+static unsigned int
+tap_support_features(void)
+{
+       int tapfd;
+       unsigned int tap_features;
 
-               mr = &vm->regions[k++];
-               /* use vaddr here! */
-               mr->guest_phys_addr = (uint64_t)(uintptr_t)seg->addr;
-               mr->userspace_addr = (uint64_t)(uintptr_t)seg->addr;
-               mr->memory_size = seg->len;
-               mr->mmap_offset = 0;
+       tapfd = open(PATH_NET_TUN, O_RDWR);
+       if (tapfd < 0) {
+               PMD_DRV_LOG(ERR, "fail to open %s: %s",
+                           PATH_NET_TUN, strerror(errno));
+               return -1;
+       }
 
-               if (k >= max_regions) {
-                       free(vm);
-                       return NULL;
-               }
+       if (ioctl(tapfd, TUNGETFEATURES, &tap_features) == -1) {
+               PMD_DRV_LOG(ERR, "TUNGETFEATURES failed: %s", strerror(errno));
+               close(tapfd);
+               return -1;
        }
 
-       vm->nregions = k;
-       vm->padding = 0;
-       return vm;
+       close(tapfd);
+       return tap_features;
 }
 
 static int
@@ -174,6 +184,9 @@ vhost_kernel_ioctl(struct virtio_user_dev *dev,
        unsigned int i;
        uint64_t req_kernel;
        struct vhost_memory_kernel *vm = NULL;
+       int vhostfd;
+       unsigned int queue_sel;
+       unsigned int features;
 
        PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
 
@@ -186,17 +199,62 @@ vhost_kernel_ioctl(struct virtio_user_dev *dev,
                arg = (void *)vm;
        }
 
-       /* We don't need memory protection here */
-       if (req_kernel == VHOST_SET_FEATURES)
+       if (req_kernel == VHOST_SET_FEATURES) {
+               /* We don't need memory protection here */
                *(uint64_t *)arg &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
 
-       for (i = 0; i < dev->max_queue_pairs; ++i) {
-               if (dev->vhostfds[i] < 0)
-                       continue;
+               /* VHOST kernel does not know about below flags */
+               *(uint64_t *)arg &= ~VHOST_KERNEL_GUEST_OFFLOADS_MASK;
+               *(uint64_t *)arg &= ~VHOST_KERNEL_HOST_OFFLOADS_MASK;
+
+               *(uint64_t *)arg &= ~(1ULL << VIRTIO_NET_F_MQ);
+       }
+
+       switch (req_kernel) {
+       case VHOST_SET_VRING_NUM:
+       case VHOST_SET_VRING_ADDR:
+       case VHOST_SET_VRING_BASE:
+       case VHOST_GET_VRING_BASE:
+       case VHOST_SET_VRING_KICK:
+       case VHOST_SET_VRING_CALL:
+               queue_sel = *(unsigned int *)arg;
+               vhostfd = dev->vhostfds[queue_sel / 2];
+               *(unsigned int *)arg = queue_sel % 2;
+               PMD_DRV_LOG(DEBUG, "vhostfd=%d, index=%u",
+                           vhostfd, *(unsigned int *)arg);
+               break;
+       default:
+               vhostfd = -1;
+       }
+       if (vhostfd == -1) {
+               for (i = 0; i < dev->max_queue_pairs; ++i) {
+                       if (dev->vhostfds[i] < 0)
+                               continue;
+
+                       ret = ioctl(dev->vhostfds[i], req_kernel, arg);
+                       if (ret < 0)
+                               break;
+               }
+       } else {
+               ret = ioctl(vhostfd, req_kernel, arg);
+       }
+
+       if (!ret && req_kernel == VHOST_GET_FEATURES) {
+               features = tap_support_features();
+               /* with tap as the backend, all these features are supported
+                * but not claimed by vhost-net, so we add them back when
+                * reporting to upper layer.
+                */
+               if (features & IFF_VNET_HDR) {
+                       *((uint64_t *)arg) |= VHOST_KERNEL_GUEST_OFFLOADS_MASK;
+                       *((uint64_t *)arg) |= VHOST_KERNEL_HOST_OFFLOADS_MASK;
+               }
 
-               ret = ioctl(dev->vhostfds[i], req_kernel, arg);
-               if (ret < 0)
-                       break;
+               /* vhost_kernel will not declare this feature, but it does
+                * support multi-queue.
+                */
+               if (features & IFF_MULTI_QUEUE)
+                       *(uint64_t *)arg |= (1ull << VIRTIO_NET_F_MQ);
        }
 
        if (vm)
@@ -269,11 +327,12 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
        int hdr_size;
        int vhostfd;
        int tapfd;
+       int req_mq = (dev->max_queue_pairs > 1);
 
        vhostfd = dev->vhostfds[pair_idx];
 
        if (!enable) {
-               if (dev->tapfds[pair_idx]) {
+               if (dev->tapfds[pair_idx] >= 0) {
                        close(dev->tapfds[pair_idx]);
                        dev->tapfds[pair_idx] = -1;
                }
@@ -288,7 +347,8 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
        else
                hdr_size = sizeof(struct virtio_net_hdr);
 
-       tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size);
+       tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq,
+                        (char *)dev->mac_addr, dev->features);
        if (tapfd < 0) {
                PMD_DRV_LOG(ERR, "fail to open tap for vhost kernel");
                return -1;
@@ -304,7 +364,7 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
        return 0;
 }
 
-struct virtio_user_backend_ops ops_kernel = {
+struct virtio_user_backend_ops virtio_ops_kernel = {
        .setup = vhost_kernel_setup,
        .send_request = vhost_kernel_ioctl,
        .enable_qp = vhost_kernel_enable_queue_pair