vhost: replace vDPA device ID in Vhost
[dpdk.git] / lib / librte_vhost / vhost_user.c
index 91482cc..3405cd8 100644 (file)
@@ -191,7 +191,7 @@ vhost_backend_cleanup(struct virtio_net *dev)
                dev->mem = NULL;
        }
 
-       free(dev->guest_pages);
+       rte_free(dev->guest_pages);
        dev->guest_pages = NULL;
 
        if (dev->log_addr) {
@@ -206,7 +206,7 @@ vhost_backend_cleanup(struct virtio_net *dev)
                        dev->inflight_info->addr = NULL;
                }
 
-               if (dev->inflight_info->fd > 0) {
+               if (dev->inflight_info->fd >= 0) {
                        close(dev->inflight_info->fd);
                        dev->inflight_info->fd = -1;
                }
@@ -315,7 +315,6 @@ vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
        uint64_t features = msg->payload.u64;
        uint64_t vhost_features = 0;
        struct rte_vdpa_device *vdpa_dev;
-       int did = -1;
 
        if (validate_msg_fds(msg, 0) != 0)
                return RTE_VHOST_MSG_RESULT_ERR;
@@ -384,8 +383,7 @@ vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
                }
        }
 
-       did = dev->vdpa_dev_id;
-       vdpa_dev = rte_vdpa_get_device(did);
+       vdpa_dev = dev->vdpa_dev;
        if (vdpa_dev && vdpa_dev->ops->set_features)
                vdpa_dev->ops->set_features(dev->vid);
 
@@ -656,13 +654,11 @@ ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 {
        if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
                uint64_t vva;
-               uint64_t req_size = *size;
 
-               vva = vhost_user_iotlb_cache_find(vq, ra,
+               vhost_user_iotlb_rd_lock(vq);
+               vva = vhost_iova_to_vva(dev, vq, ra,
                                        size, VHOST_ACCESS_RW);
-               if (req_size != *size)
-                       vhost_user_iotlb_miss(dev, (ra + *size),
-                                             VHOST_ACCESS_RW);
+               vhost_user_iotlb_rd_unlock(vq);
 
                return vva;
        }
@@ -670,37 +666,16 @@ ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
        return qva_to_vva(dev, ra, size);
 }
 
-/*
- * Converts vring log address to GPA
- * If IOMMU is enabled, the log address is IOVA
- * If IOMMU not enabled, the log address is already GPA
- */
 static uint64_t
-translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
-               uint64_t log_addr)
+log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
-       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
-               const uint64_t exp_size = sizeof(struct vring_used) +
-                       sizeof(struct vring_used_elem) * vq->size;
-               uint64_t hva, gpa;
-               uint64_t size = exp_size;
-
-               hva = vhost_iova_to_vva(dev, vq, log_addr,
-                                       &size, VHOST_ACCESS_RW);
-               if (size != exp_size)
-                       return 0;
+       uint64_t log_gpa;
 
-               gpa = hva_to_gpa(dev, hva, exp_size);
-               if (!gpa) {
-                       VHOST_LOG_CONFIG(ERR,
-                               "VQ: Failed to find GPA for log_addr: 0x%" PRIx64 " hva: 0x%" PRIx64 "\n",
-                               log_addr, hva);
-                       return 0;
-               }
-               return gpa;
+       vhost_user_iotlb_rd_lock(vq);
+       log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr);
+       vhost_user_iotlb_rd_unlock(vq);
 
-       } else
-               return log_addr;
+       return log_gpa;
 }
 
 static struct virtio_net *
@@ -712,7 +687,7 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index)
 
        if (addr->flags & (1 << VHOST_VRING_F_LOG)) {
                vq->log_guest_addr =
-                       translate_log_addr(dev, vq, addr->log_guest_addr);
+                       log_addr_to_gpa(dev, vq);
                if (vq->log_guest_addr == 0) {
                        VHOST_LOG_CONFIG(DEBUG,
                                "(%d) failed to map log_guest_addr.\n",
@@ -926,11 +901,12 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
        if (dev->nr_guest_pages == dev->max_guest_pages) {
                dev->max_guest_pages *= 2;
                old_pages = dev->guest_pages;
-               dev->guest_pages = realloc(dev->guest_pages,
-                                       dev->max_guest_pages * sizeof(*page));
-               if (!dev->guest_pages) {
+               dev->guest_pages = rte_realloc(dev->guest_pages,
+                                       dev->max_guest_pages * sizeof(*page),
+                                       RTE_CACHE_LINE_SIZE);
+               if (dev->guest_pages == NULL) {
                        VHOST_LOG_CONFIG(ERR, "cannot realloc guest_pages\n");
-                       free(old_pages);
+                       rte_free(old_pages);
                        return -1;
                }
        }
@@ -987,6 +963,12 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
                reg_size -= size;
        }
 
+       /* sort guest page array if over binary search threshold */
+       if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
+               qsort((void *)dev->guest_pages, dev->nr_guest_pages,
+                       sizeof(struct guest_page), guest_page_addrcmp);
+       }
+
        return 0;
 }
 
@@ -1085,10 +1067,12 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
                        vhost_user_iotlb_flush_all(dev->virtqueue[i]);
 
        dev->nr_guest_pages = 0;
-       if (!dev->guest_pages) {
+       if (dev->guest_pages == NULL) {
                dev->max_guest_pages = 8;
-               dev->guest_pages = malloc(dev->max_guest_pages *
-                                               sizeof(struct guest_page));
+               dev->guest_pages = rte_zmalloc(NULL,
+                                       dev->max_guest_pages *
+                                       sizeof(struct guest_page),
+                                       RTE_CACHE_LINE_SIZE);
                if (dev->guest_pages == NULL) {
                        VHOST_LOG_CONFIG(ERR,
                                "(%d) failed to allocate memory "
@@ -1431,6 +1415,7 @@ vhost_user_get_inflight_fd(struct virtio_net **pdev,
                                "failed to alloc dev inflight area\n");
                        return RTE_VHOST_MSG_RESULT_ERR;
                }
+               dev->inflight_info->fd = -1;
        }
 
        num_queues = msg->payload.inflight.num_queues;
@@ -1456,6 +1441,16 @@ vhost_user_get_inflight_fd(struct virtio_net **pdev,
        }
        memset(addr, 0, mmap_size);
 
+       if (dev->inflight_info->addr) {
+               munmap(dev->inflight_info->addr, dev->inflight_info->size);
+               dev->inflight_info->addr = NULL;
+       }
+
+       if (dev->inflight_info->fd >= 0) {
+               close(dev->inflight_info->fd);
+               dev->inflight_info->fd = -1;
+       }
+
        dev->inflight_info->addr = addr;
        dev->inflight_info->size = msg->payload.inflight.mmap_size = mmap_size;
        dev->inflight_info->fd = msg->fds[0] = fd;
@@ -1538,10 +1533,13 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
                                "failed to alloc dev inflight area\n");
                        return RTE_VHOST_MSG_RESULT_ERR;
                }
+               dev->inflight_info->fd = -1;
        }
 
-       if (dev->inflight_info->addr)
+       if (dev->inflight_info->addr) {
                munmap(dev->inflight_info->addr, dev->inflight_info->size);
+               dev->inflight_info->addr = NULL;
+       }
 
        addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
                    fd, mmap_offset);
@@ -1550,8 +1548,10 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
                return RTE_VHOST_MSG_RESULT_ERR;
        }
 
-       if (dev->inflight_info->fd)
+       if (dev->inflight_info->fd >= 0) {
                close(dev->inflight_info->fd);
+               dev->inflight_info->fd = -1;
+       }
 
        dev->inflight_info->fd = fd;
        dev->inflight_info->addr = addr;
@@ -1645,8 +1645,11 @@ vhost_check_queue_inflights_split(struct virtio_net *dev,
            (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
                return RTE_VHOST_MSG_RESULT_OK;
 
+       /* The frontend may still not support the inflight feature
+        * although we negotiate the protocol feature.
+        */
        if ((!vq->inflight_split))
-               return RTE_VHOST_MSG_RESULT_ERR;
+               return RTE_VHOST_MSG_RESULT_OK;
 
        if (!vq->inflight_split->version) {
                vq->inflight_split->version = INFLIGHT_VERSION;
@@ -1726,8 +1729,11 @@ vhost_check_queue_inflights_packed(struct virtio_net *dev,
            (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
                return RTE_VHOST_MSG_RESULT_OK;
 
+       /* The frontend may still not support the inflight feature
+        * although we negotiate the protocol feature.
+        */
        if ((!vq->inflight_packed))
-               return RTE_VHOST_MSG_RESULT_ERR;
+               return RTE_VHOST_MSG_RESULT_OK;
 
        if (!vq->inflight_packed->version) {
                vq->inflight_packed->version = INFLIGHT_VERSION;
@@ -1963,7 +1969,6 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
        int enable = (int)msg->payload.state.num;
        int index = (int)msg->payload.state.index;
        struct rte_vdpa_device *vdpa_dev;
-       int did = -1;
 
        if (validate_msg_fds(msg, 0) != 0)
                return RTE_VHOST_MSG_RESULT_ERR;
@@ -1972,8 +1977,7 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
                "set queue enable: %d to qp idx: %d\n",
                enable, index);
 
-       did = dev->vdpa_dev_id;
-       vdpa_dev = rte_vdpa_get_device(did);
+       vdpa_dev = dev->vdpa_dev;
        if (vdpa_dev && vdpa_dev->ops->set_vring_state)
                vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
 
@@ -2004,15 +2008,6 @@ vhost_user_get_protocol_features(struct virtio_net **pdev,
        rte_vhost_driver_get_features(dev->ifname, &features);
        rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
 
-       /*
-        * REPLY_ACK protocol feature is only mandatory for now
-        * for IOMMU feature. If IOMMU is explicitly disabled by the
-        * application, disable also REPLY_ACK feature for older buggy
-        * Qemu versions (from v2.7.0 to v2.9.0).
-        */
-       if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
-               protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK);
-
        msg->payload.u64 = protocol_features;
        msg->size = sizeof(msg->payload.u64);
        msg->fd_num = 0;
@@ -2076,10 +2071,10 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
        size = msg->payload.log.mmap_size;
        off  = msg->payload.log.mmap_offset;
 
-       /* Don't allow mmap_offset to point outside the mmap region */
-       if (off > size) {
+       /* Check for mmap size and offset overflow. */
+       if (off >= -size) {
                VHOST_LOG_CONFIG(ERR,
-                       "log offset %#"PRIx64" exceeds log size %#"PRIx64"\n",
+                       "log offset %#"PRIx64" and log size %#"PRIx64" overflow\n",
                        off, size);
                return RTE_VHOST_MSG_RESULT_ERR;
        }
@@ -2148,7 +2143,6 @@ vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg,
        struct virtio_net *dev = *pdev;
        uint8_t *mac = (uint8_t *)&msg->payload.u64;
        struct rte_vdpa_device *vdpa_dev;
-       int did = -1;
 
        if (validate_msg_fds(msg, 0) != 0)
                return RTE_VHOST_MSG_RESULT_ERR;
@@ -2162,13 +2156,11 @@ vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg,
         * Set the flag to inject a RARP broadcast packet at
         * rte_vhost_dequeue_burst().
         *
-        * rte_smp_wmb() is for making sure the mac is copied
-        * before the flag is set.
+        * __ATOMIC_RELEASE ordering is for making sure the mac is
+        * copied before the flag is set.
         */
-       rte_smp_wmb();
-       rte_atomic16_set(&dev->broadcast_rarp, 1);
-       did = dev->vdpa_dev_id;
-       vdpa_dev = rte_vdpa_get_device(did);
+       __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE);
+       vdpa_dev = dev->vdpa_dev;
        if (vdpa_dev && vdpa_dev->ops->migration_done)
                vdpa_dev->ops->migration_done(dev->vid);
 
@@ -2245,6 +2237,13 @@ is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
        if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
                return 1;
 
+       if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
+               len = sizeof(uint64_t);
+               if (ra->log_guest_addr < end &&
+                   (ra->log_guest_addr + len) > start)
+                       return 1;
+       }
+
        return 0;
 }
 
@@ -2270,6 +2269,13 @@ is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
        if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
                return 1;
 
+       if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
+               len = sizeof(uint64_t);
+               if (ra->log_guest_addr < end &&
+                   (ra->log_guest_addr + len) > start)
+                       return 1;
+       }
+
        return 0;
 }
 
@@ -2529,7 +2535,7 @@ static int
 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
                        struct VhostUserMsg *msg)
 {
-       uint16_t vring_idx;
+       uint32_t vring_idx;
 
        switch (msg->request.master) {
        case VHOST_USER_SET_VRING_KICK:
@@ -2601,7 +2607,6 @@ vhost_user_msg_handler(int vid, int fd)
        struct virtio_net *dev;
        struct VhostUserMsg msg;
        struct rte_vdpa_device *vdpa_dev;
-       int did = -1;
        int ret;
        int unlock_required = 0;
        bool handled;
@@ -2793,8 +2798,7 @@ skip_to_post_handle:
                }
        }
 
-       did = dev->vdpa_dev_id;
-       vdpa_dev = rte_vdpa_get_device(did);
+       vdpa_dev = dev->vdpa_dev;
        if (vdpa_dev && virtio_is_ready(dev) &&
                        !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
                        msg.request.master == VHOST_USER_SET_VRING_CALL) {
@@ -2815,11 +2819,19 @@ static int process_slave_message_reply(struct virtio_net *dev,
        if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
                return 0;
 
-       if (read_vhost_message(dev->slave_req_fd, &msg_reply) < 0) {
+       ret = read_vhost_message(dev->slave_req_fd, &msg_reply);
+       if (ret <= 0) {
+               if (ret < 0)
+                       VHOST_LOG_CONFIG(ERR,
+                               "vhost read slave message reply failed\n");
+               else
+                       VHOST_LOG_CONFIG(INFO,
+                               "vhost peer closed\n");
                ret = -1;
                goto out;
        }
 
+       ret = 0;
        if (msg_reply.request.slave != msg->request.slave) {
                VHOST_LOG_CONFIG(ERR,
                        "Received unexpected msg type (%u), expected %u\n",
@@ -2935,7 +2947,7 @@ int rte_vhost_host_notifier_ctrl(int vid, bool enable)
 {
        struct virtio_net *dev;
        struct rte_vdpa_device *vdpa_dev;
-       int vfio_device_fd, did, ret = 0;
+       int vfio_device_fd, ret = 0;
        uint64_t offset, size;
        unsigned int i;
 
@@ -2943,9 +2955,9 @@ int rte_vhost_host_notifier_ctrl(int vid, bool enable)
        if (!dev)
                return -ENODEV;
 
-       did = dev->vdpa_dev_id;
-       if (did < 0)
-               return -EINVAL;
+       vdpa_dev = dev->vdpa_dev;
+       if (vdpa_dev == NULL)
+               return -ENODEV;
 
        if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
            !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
@@ -2957,10 +2969,6 @@ int rte_vhost_host_notifier_ctrl(int vid, bool enable)
                        (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
                return -ENOTSUP;
 
-       vdpa_dev = rte_vdpa_get_device(did);
-       if (!vdpa_dev)
-               return -ENODEV;
-
        RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
        RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);