net/virtio: add virtio-user memory tables ops
authorMaxime Coquelin <maxime.coquelin@redhat.com>
Tue, 26 Jan 2021 10:16:24 +0000 (11:16 +0100)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 29 Jan 2021 17:16:09 +0000 (18:16 +0100)
This patch implements a dedicated callback for
preparing and sending memory table to the backends.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
drivers/net/virtio/virtio_user/vhost.h
drivers/net/virtio/virtio_user/vhost_kernel.c
drivers/net/virtio/virtio_user/vhost_user.c
drivers/net/virtio/virtio_user/vhost_vdpa.c
drivers/net/virtio/virtio_user/virtio_user_dev.c

index d805526..463801f 100644 (file)
@@ -114,6 +114,7 @@ struct virtio_user_backend_ops {
        int (*set_features)(struct virtio_user_dev *dev, uint64_t features);
        int (*get_protocol_features)(struct virtio_user_dev *dev, uint64_t *features);
        int (*set_protocol_features)(struct virtio_user_dev *dev, uint64_t features);
+       int (*set_memory_table)(struct virtio_user_dev *dev);
        int (*send_request)(struct virtio_user_dev *dev,
                            enum vhost_user_request req,
                            void *arg);
index e46039e..69f932b 100644 (file)
@@ -148,17 +148,6 @@ vhost_kernel_set_features(struct virtio_user_dev *dev, uint64_t features)
        return vhost_kernel_ioctl(dev->vhostfds[0], VHOST_SET_FEATURES, &features);
 }
 
-static uint64_t vhost_req_user_to_kernel[] = {
-       [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
-       [VHOST_USER_SET_VRING_CALL] = VHOST_SET_VRING_CALL,
-       [VHOST_USER_SET_VRING_NUM] = VHOST_SET_VRING_NUM,
-       [VHOST_USER_SET_VRING_BASE] = VHOST_SET_VRING_BASE,
-       [VHOST_USER_GET_VRING_BASE] = VHOST_GET_VRING_BASE,
-       [VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR,
-       [VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK,
-       [VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
-};
-
 static int
 add_memseg_list(const struct rte_memseg_list *msl, void *arg)
 {
@@ -193,16 +182,17 @@ add_memseg_list(const struct rte_memseg_list *msl, void *arg)
  * have much more memory regions. Below function will treat each
  * contiguous memory space reserved by DPDK as one region.
  */
-static struct vhost_memory_kernel *
-prepare_vhost_memory_kernel(void)
+static int
+vhost_kernel_set_memory_table(struct virtio_user_dev *dev)
 {
        struct vhost_memory_kernel *vm;
+       int ret;
 
        vm = malloc(sizeof(struct vhost_memory_kernel) +
                        max_regions *
                        sizeof(struct vhost_memory_region));
        if (!vm)
-               return NULL;
+               goto err;
 
        vm->nregions = 0;
        vm->padding = 0;
@@ -211,14 +201,34 @@ prepare_vhost_memory_kernel(void)
         * The memory lock has already been taken by memory subsystem
         * or virtio_user_start_device().
         */
-       if (rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm) < 0) {
-               free(vm);
-               return NULL;
-       }
+       ret = rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm);
+       if (ret < 0)
+               goto err_free;
+
+       ret = vhost_kernel_ioctl(dev->vhostfds[0], VHOST_SET_MEM_TABLE, vm);
+       if (ret < 0)
+               goto err_free;
 
-       return vm;
+       free(vm);
+
+       return 0;
+err_free:
+       free(vm);
+err:
+       PMD_DRV_LOG(ERR, "Failed to set memory table");
+       return -1;
 }
 
+static uint64_t vhost_req_user_to_kernel[] = {
+       [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
+       [VHOST_USER_SET_VRING_CALL] = VHOST_SET_VRING_CALL,
+       [VHOST_USER_SET_VRING_NUM] = VHOST_SET_VRING_NUM,
+       [VHOST_USER_SET_VRING_BASE] = VHOST_SET_VRING_BASE,
+       [VHOST_USER_GET_VRING_BASE] = VHOST_GET_VRING_BASE,
+       [VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR,
+       [VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK,
+};
+
 static int
 vhost_kernel_send_request(struct virtio_user_dev *dev,
                   enum vhost_user_request req,
@@ -227,7 +237,6 @@ vhost_kernel_send_request(struct virtio_user_dev *dev,
        int ret = -1;
        unsigned int i;
        uint64_t req_kernel;
-       struct vhost_memory_kernel *vm = NULL;
        int vhostfd;
        unsigned int queue_sel;
 
@@ -235,13 +244,6 @@ vhost_kernel_send_request(struct virtio_user_dev *dev,
 
        req_kernel = vhost_req_user_to_kernel[req];
 
-       if (req_kernel == VHOST_SET_MEM_TABLE) {
-               vm = prepare_vhost_memory_kernel();
-               if (!vm)
-                       return -1;
-               arg = (void *)vm;
-       }
-
        switch (req_kernel) {
        case VHOST_SET_VRING_NUM:
        case VHOST_SET_VRING_ADDR:
@@ -271,9 +273,6 @@ vhost_kernel_send_request(struct virtio_user_dev *dev,
                ret = ioctl(vhostfd, req_kernel, arg);
        }
 
-       if (vm)
-               free(vm);
-
        if (ret < 0)
                PMD_DRV_LOG(ERR, "%s failed: %s",
                            vhost_msg_strings[req], strerror(errno));
@@ -403,6 +402,7 @@ struct virtio_user_backend_ops virtio_ops_kernel = {
        .set_owner = vhost_kernel_set_owner,
        .get_features = vhost_kernel_get_features,
        .set_features = vhost_kernel_set_features,
+       .set_memory_table = vhost_kernel_set_memory_table,
        .send_request = vhost_kernel_send_request,
        .enable_qp = vhost_kernel_enable_queue_pair
 };
index 4877574..84765d5 100644 (file)
@@ -81,6 +81,9 @@ vhost_user_write(int fd, struct vhost_user_msg *msg, int *fds, int fd_num)
                r = sendmsg(fd, &msgh, 0);
        } while (r < 0 && errno == EINTR);
 
+       if (r < 0)
+               PMD_DRV_LOG(ERR, "Failed to send msg: %s", strerror(errno));
+
        return r;
 }
 
@@ -125,6 +128,39 @@ fail:
        return -1;
 }
 
+static int
+vhost_user_check_reply_ack(struct virtio_user_dev *dev, struct vhost_user_msg *msg)
+{
+       enum vhost_user_request req = msg->request;
+       int ret;
+
+       if (!(msg->flags & VHOST_USER_NEED_REPLY_MASK))
+               return 0;
+
+       ret = vhost_user_read(dev->vhostfd, msg);
+       if (ret < 0) {
+               PMD_DRV_LOG(ERR, "Failed to read reply-ack");
+               return -1;
+       }
+
+       if (req != msg->request) {
+               PMD_DRV_LOG(ERR, "Unexpected reply-ack request type (%d)", msg->request);
+               return -1;
+       }
+
+       if (msg->size != sizeof(msg->payload.u64)) {
+               PMD_DRV_LOG(ERR, "Unexpected reply-ack payload size (%u)", msg->size);
+               return -1;
+       }
+
+       if (msg->payload.u64) {
+               PMD_DRV_LOG(ERR, "Slave replied NACK to request type (%d)", msg->request);
+               return -1;
+       }
+
+       return 0;
+}
+
 static int
 vhost_user_set_owner(struct virtio_user_dev *dev)
 {
@@ -338,25 +374,47 @@ update_memory_region(const struct rte_memseg_list *msl __rte_unused,
 }
 
 static int
-prepare_vhost_memory_user(struct vhost_user_msg *msg, int fds[])
+vhost_user_set_memory_table(struct virtio_user_dev *dev)
 {
        struct walk_arg wa;
+       int fds[VHOST_MEMORY_MAX_NREGIONS];
+       int ret, fd_num;
+       struct vhost_user_msg msg = {
+               .request = VHOST_USER_SET_MEM_TABLE,
+               .flags = VHOST_USER_VERSION,
+       };
+
+       if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK))
+               msg.flags |= VHOST_USER_NEED_REPLY_MASK;
 
        wa.region_nr = 0;
-       wa.vm = &msg->payload.memory;
+       wa.vm = &msg.payload.memory;
        wa.fds = fds;
 
        /*
         * The memory lock has already been taken by memory subsystem
         * or virtio_user_start_device().
         */
-       if (rte_memseg_walk_thread_unsafe(update_memory_region, &wa) < 0)
-               return -1;
+       ret = rte_memseg_walk_thread_unsafe(update_memory_region, &wa);
+       if (ret < 0)
+               goto err;
 
-       msg->payload.memory.nregions = wa.region_nr;
-       msg->payload.memory.padding = 0;
+       fd_num = wa.region_nr;
+       msg.payload.memory.nregions = wa.region_nr;
+       msg.payload.memory.padding = 0;
 
-       return 0;
+       msg.size = sizeof(msg.payload.memory.nregions);
+       msg.size += sizeof(msg.payload.memory.padding);
+       msg.size += fd_num * sizeof(struct vhost_memory_region);
+
+       ret = vhost_user_write(dev->vhostfd, &msg, fds, fd_num);
+       if (ret < 0)
+               goto err;
+
+       return vhost_user_check_reply_ack(dev, &msg);
+err:
+       PMD_DRV_LOG(ERR, "Failed to set memory table");
+       return -1;
 }
 
 static struct vhost_user_msg m;
@@ -369,7 +427,6 @@ const char * const vhost_msg_strings[] = {
        [VHOST_USER_GET_VRING_BASE] = "VHOST_GET_VRING_BASE",
        [VHOST_USER_SET_VRING_ADDR] = "VHOST_SET_VRING_ADDR",
        [VHOST_USER_SET_VRING_KICK] = "VHOST_SET_VRING_KICK",
-       [VHOST_USER_SET_MEM_TABLE] = "VHOST_SET_MEM_TABLE",
        [VHOST_USER_SET_VRING_ENABLE] = "VHOST_SET_VRING_ENABLE",
        [VHOST_USER_SET_STATUS] = "VHOST_SET_STATUS",
        [VHOST_USER_GET_STATUS] = "VHOST_GET_STATUS",
@@ -434,18 +491,6 @@ vhost_user_sock(struct virtio_user_dev *dev,
        case VHOST_USER_RESET_OWNER:
                break;
 
-       case VHOST_USER_SET_MEM_TABLE:
-               if (prepare_vhost_memory_user(&msg, fds) < 0)
-                       return -1;
-               fd_num = msg.payload.memory.nregions;
-               msg.size = sizeof(m.payload.memory.nregions);
-               msg.size += sizeof(m.payload.memory.padding);
-               msg.size += fd_num * sizeof(struct vhost_memory_region);
-
-               if (has_reply_ack)
-                       msg.flags |= VHOST_USER_NEED_REPLY_MASK;
-               break;
-
        case VHOST_USER_SET_LOG_FD:
                fds[fd_num++] = *((int *)arg);
                break;
@@ -644,6 +689,7 @@ struct virtio_user_backend_ops virtio_ops_user = {
        .set_features = vhost_user_set_features,
        .get_protocol_features = vhost_user_get_protocol_features,
        .set_protocol_features = vhost_user_set_protocol_features,
+       .set_memory_table = vhost_user_set_memory_table,
        .send_request = vhost_user_sock,
        .enable_qp = vhost_user_enable_queue_pair
 };
index f627afb..bd7daeb 100644 (file)
@@ -19,7 +19,6 @@
 #define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
 #define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
 #define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
-#define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, void *)
 #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
 #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
 #define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
@@ -46,7 +45,6 @@ static uint64_t vhost_req_user_to_vdpa[] = {
        [VHOST_USER_GET_VRING_BASE] = VHOST_GET_VRING_BASE,
        [VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR,
        [VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK,
-       [VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
        [VHOST_USER_SET_STATUS] = VHOST_VDPA_SET_STATUS,
        [VHOST_USER_GET_STATUS] = VHOST_VDPA_GET_STATUS,
        [VHOST_USER_SET_VRING_ENABLE] = VHOST_VDPA_SET_VRING_ENABLE,
@@ -319,7 +317,7 @@ vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
 }
 
 static int
-vhost_vdpa_dma_map_all(struct virtio_user_dev *dev)
+vhost_vdpa_set_memory_table(struct virtio_user_dev *dev)
 {
        int ret;
 
@@ -376,9 +374,6 @@ vhost_vdpa_send_request(struct virtio_user_dev *dev,
 
        req_vdpa = vhost_req_user_to_vdpa[req];
 
-       if (req_vdpa == VHOST_SET_MEM_TABLE)
-               return vhost_vdpa_dma_map_all(dev);
-
        switch (req_vdpa) {
        case VHOST_SET_VRING_NUM:
        case VHOST_SET_VRING_ADDR:
@@ -461,6 +456,7 @@ struct virtio_user_backend_ops virtio_ops_vdpa = {
        .set_features = vhost_vdpa_set_features,
        .get_protocol_features = vhost_vdpa_get_backend_features,
        .set_protocol_features = vhost_vdpa_set_backend_features,
+       .set_memory_table = vhost_vdpa_set_memory_table,
        .send_request = vhost_vdpa_send_request,
        .enable_qp = vhost_vdpa_enable_queue_pair,
        .dma_map = vhost_vdpa_dma_map_batch,
index 45417bb..cb8f8a8 100644 (file)
@@ -177,7 +177,7 @@ virtio_user_start_device(struct virtio_user_dev *dev)
                goto error;
 
        /* Step 2: share memory regions */
-       ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
+       ret = dev->ops->set_memory_table(dev);
        if (ret < 0)
                goto error;
 
@@ -352,7 +352,7 @@ virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
                dev->ops->enable_qp(dev, i, 0);
 
        /* Step 2: update memory regions */
-       dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
+       dev->ops->set_memory_table(dev);
 
        /* Step 3: resume the active queues */
        for (i = 0; i < dev->queue_pairs; i++)