vhost: fix possible denial of service by leaking FDs
authorMaxime Coquelin <maxime.coquelin@redhat.com>
Tue, 3 Sep 2019 15:34:22 +0000 (17:34 +0200)
committerDavid Marchand <david.marchand@redhat.com>
Tue, 12 Nov 2019 11:21:20 +0000 (12:21 +0100)
A malicious Vhost-user master could send in loop hand-crafted
vhost-user messages containing more file descriptors the
vhost-user slave expects. Doing so causes the application using
the vhost-user library to run out of FDs.

This issue has been assigned CVE-2019-14818

Fixes: 8f972312b8f4 ("vhost: support vhost-user")

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
lib/librte_vhost/vhost_user.c

index d4c9cd3..90ecee1 100644 (file)
@@ -92,6 +92,36 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
 static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg);
 static int read_vhost_message(int sockfd, struct VhostUserMsg *msg);
 
+static void
+close_msg_fds(struct VhostUserMsg *msg)
+{
+       int i;
+
+       for (i = 0; i < msg->fd_num; i++)
+               close(msg->fds[i]);
+}
+
+/*
+ * Ensure the expected number of FDs is received,
+ * close all FDs and return an error if this is not the case.
+ */
+static int
+validate_msg_fds(struct VhostUserMsg *msg, int expected_fds)
+{
+       if (msg->fd_num == expected_fds)
+               return 0;
+
+       RTE_LOG(ERR, VHOST_CONFIG,
+               " Expect %d FDs for request %s, received %d\n",
+               expected_fds,
+               vhost_message_str[msg->request.master],
+               msg->fd_num);
+
+       close_msg_fds(msg);
+
+       return -1;
+}
+
 static uint64_t
 get_blk_size(int fd)
 {
@@ -204,18 +234,25 @@ vhost_backend_cleanup(struct virtio_net *dev)
  */
 static int
 vhost_user_set_owner(struct virtio_net **pdev __rte_unused,
-                       struct VhostUserMsg *msg __rte_unused,
+                       struct VhostUserMsg *msg,
                        int main_fd __rte_unused)
 {
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        return RTE_VHOST_MSG_RESULT_OK;
 }
 
 static int
 vhost_user_reset_owner(struct virtio_net **pdev,
-                       struct VhostUserMsg *msg __rte_unused,
+                       struct VhostUserMsg *msg,
                        int main_fd __rte_unused)
 {
        struct virtio_net *dev = *pdev;
+
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        vhost_destroy_device_notify(dev);
 
        cleanup_device(dev, 0);
@@ -233,6 +270,9 @@ vhost_user_get_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
        struct virtio_net *dev = *pdev;
        uint64_t features = 0;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        rte_vhost_driver_get_features(dev->ifname, &features);
 
        msg->payload.u64 = features;
@@ -252,6 +292,9 @@ vhost_user_get_queue_num(struct virtio_net **pdev, struct VhostUserMsg *msg,
        struct virtio_net *dev = *pdev;
        uint32_t queue_num = 0;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
 
        msg->payload.u64 = (uint64_t)queue_num;
@@ -274,6 +317,9 @@ vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
        struct rte_vdpa_device *vdpa_dev;
        int did = -1;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        rte_vhost_driver_get_features(dev->ifname, &vhost_features);
        if (features & ~vhost_features) {
                RTE_LOG(ERR, VHOST_CONFIG,
@@ -357,6 +403,9 @@ vhost_user_set_vring_num(struct virtio_net **pdev,
        struct virtio_net *dev = *pdev;
        struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        vq->size = msg->payload.state.num;
 
        /* VIRTIO 1.0, 2.4 Virtqueues says:
@@ -798,6 +847,9 @@ vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg,
        struct vhost_vring_addr *addr = &msg->payload.addr;
        bool access_ok;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (dev->mem == NULL)
                return RTE_VHOST_MSG_RESULT_ERR;
 
@@ -839,6 +891,9 @@ vhost_user_set_vring_base(struct virtio_net **pdev,
        struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
        uint64_t val = msg->payload.state.num;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (vq_is_packed(dev)) {
                /*
                 * Bit[0:14]: avail index
@@ -1000,6 +1055,9 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
        int populate;
        int fd;
 
+       if (validate_msg_fds(msg, memory->nregions) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
                RTE_LOG(ERR, VHOST_CONFIG,
                        "too many memory regions (%u)\n", memory->nregions);
@@ -1010,8 +1068,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
                RTE_LOG(INFO, VHOST_CONFIG,
                        "(%d) memory regions not changed\n", dev->vid);
 
-               for (i = 0; i < memory->nregions; i++)
-                       close(msg->fds[i]);
+               close_msg_fds(msg);
 
                return RTE_VHOST_MSG_RESULT_OK;
        }
@@ -1154,6 +1211,10 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
                                "Failed to read qemu ack on postcopy set-mem-table\n");
                        goto err_mmap;
                }
+
+               if (validate_msg_fds(&ack_msg, 0) != 0)
+                       goto err_mmap;
+
                if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) {
                        RTE_LOG(ERR, VHOST_CONFIG,
                                "Bad qemu ack on postcopy set-mem-table (%d)\n",
@@ -1503,6 +1564,9 @@ vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg,
        struct vhost_vring_file file;
        struct vhost_virtqueue *vq;
 
+       if (validate_msg_fds(msg, 1) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
        if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
                file.fd = VIRTIO_INVALID_EVENTFD;
@@ -1524,6 +1588,9 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
                        struct VhostUserMsg *msg,
                        int main_fd __rte_unused)
 {
+       if (validate_msg_fds(msg, 1) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK))
                close(msg->fds[0]);
        RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
@@ -1724,6 +1791,9 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
        struct vhost_vring_file file;
        struct vhost_virtqueue *vq;
 
+       if (validate_msg_fds(msg, 1) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
        if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
                file.fd = VIRTIO_INVALID_EVENTFD;
@@ -1794,6 +1864,9 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
        struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
        uint64_t val;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        /* We have to stop the queue (virtio) if it is running. */
        vhost_destroy_device_notify(dev);
 
@@ -1869,6 +1942,9 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
        struct rte_vdpa_device *vdpa_dev;
        int did = -1;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        RTE_LOG(INFO, VHOST_CONFIG,
                "set queue enable: %d to qp idx: %d\n",
                enable, index);
@@ -1899,6 +1975,9 @@ vhost_user_get_protocol_features(struct virtio_net **pdev,
        struct virtio_net *dev = *pdev;
        uint64_t features, protocol_features;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        rte_vhost_driver_get_features(dev->ifname, &features);
        rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
 
@@ -1927,6 +2006,9 @@ vhost_user_set_protocol_features(struct virtio_net **pdev,
        uint64_t protocol_features = msg->payload.u64;
        uint64_t slave_protocol_features = 0;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        rte_vhost_driver_get_protocol_features(dev->ifname,
                        &slave_protocol_features);
        if (protocol_features & ~slave_protocol_features) {
@@ -1953,6 +2035,9 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
        uint64_t size, off;
        void *addr;
 
+       if (validate_msg_fds(msg, 1) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (fd < 0) {
                RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
                return RTE_VHOST_MSG_RESULT_ERR;
@@ -2016,6 +2101,9 @@ static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused,
                        struct VhostUserMsg *msg,
                        int main_fd __rte_unused)
 {
+       if (validate_msg_fds(msg, 1) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        close(msg->fds[0]);
        RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
 
@@ -2039,6 +2127,9 @@ vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg,
        struct rte_vdpa_device *vdpa_dev;
        int did = -1;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        RTE_LOG(DEBUG, VHOST_CONFIG,
                ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
                mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
@@ -2066,6 +2157,10 @@ vhost_user_net_set_mtu(struct virtio_net **pdev, struct VhostUserMsg *msg,
                        int main_fd __rte_unused)
 {
        struct virtio_net *dev = *pdev;
+
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (msg->payload.u64 < VIRTIO_MIN_MTU ||
                        msg->payload.u64 > VIRTIO_MAX_MTU) {
                RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
@@ -2086,6 +2181,9 @@ vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
        struct virtio_net *dev = *pdev;
        int fd = msg->fds[0];
 
+       if (validate_msg_fds(msg, 1) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (fd < 0) {
                RTE_LOG(ERR, VHOST_CONFIG,
                                "Invalid file descriptor for slave channel (%d)\n",
@@ -2171,6 +2269,9 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
        uint16_t i;
        uint64_t vva, len;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        switch (imsg->type) {
        case VHOST_IOTLB_UPDATE:
                len = imsg->size;
@@ -2217,6 +2318,9 @@ vhost_user_set_postcopy_advise(struct virtio_net **pdev,
 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
        struct uffdio_api api_struct;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
 
        if (dev->postcopy_ufd == -1) {
@@ -2252,6 +2356,9 @@ vhost_user_set_postcopy_listen(struct virtio_net **pdev,
 {
        struct virtio_net *dev = *pdev;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (dev->mem && dev->mem->nregions) {
                RTE_LOG(ERR, VHOST_CONFIG,
                        "Regions already registered at postcopy-listen\n");
@@ -2268,6 +2375,9 @@ vhost_user_postcopy_end(struct virtio_net **pdev, struct VhostUserMsg *msg,
 {
        struct virtio_net *dev = *pdev;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        dev->postcopy_listening = 0;
        if (dev->postcopy_ufd >= 0) {
                close(dev->postcopy_ufd);
@@ -2621,6 +2731,7 @@ skip_to_post_handle:
        if (!handled) {
                RTE_LOG(ERR, VHOST_CONFIG,
                        "vhost message (req: %d) was not handled.\n", request);
+               close_msg_fds(&msg);
                ret = RTE_VHOST_MSG_RESULT_ERR;
        }