int (*set_features)(struct virtio_user_dev *dev, uint64_t features);
int (*get_protocol_features)(struct virtio_user_dev *dev, uint64_t *features);
int (*set_protocol_features)(struct virtio_user_dev *dev, uint64_t features);
+ int (*set_memory_table)(struct virtio_user_dev *dev);
int (*send_request)(struct virtio_user_dev *dev,
enum vhost_user_request req,
void *arg);
return vhost_kernel_ioctl(dev->vhostfds[0], VHOST_SET_FEATURES, &features);
}
-static uint64_t vhost_req_user_to_kernel[] = {
- [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
- [VHOST_USER_SET_VRING_CALL] = VHOST_SET_VRING_CALL,
- [VHOST_USER_SET_VRING_NUM] = VHOST_SET_VRING_NUM,
- [VHOST_USER_SET_VRING_BASE] = VHOST_SET_VRING_BASE,
- [VHOST_USER_GET_VRING_BASE] = VHOST_GET_VRING_BASE,
- [VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR,
- [VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK,
- [VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
-};
-
static int
add_memseg_list(const struct rte_memseg_list *msl, void *arg)
{
* have much more memory regions. Below function will treat each
* contiguous memory space reserved by DPDK as one region.
*/
-static struct vhost_memory_kernel *
-prepare_vhost_memory_kernel(void)
+static int
+vhost_kernel_set_memory_table(struct virtio_user_dev *dev)
{
struct vhost_memory_kernel *vm;
+ int ret;
vm = malloc(sizeof(struct vhost_memory_kernel) +
max_regions *
sizeof(struct vhost_memory_region));
if (!vm)
- return NULL;
+ goto err;
vm->nregions = 0;
vm->padding = 0;
* The memory lock has already been taken by memory subsystem
* or virtio_user_start_device().
*/
- if (rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm) < 0) {
- free(vm);
- return NULL;
- }
+ ret = rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm);
+ if (ret < 0)
+ goto err_free;
+
+ ret = vhost_kernel_ioctl(dev->vhostfds[0], VHOST_SET_MEM_TABLE, vm);
+ if (ret < 0)
+ goto err_free;
- return vm;
+ free(vm);
+
+ return 0;
+err_free:
+ free(vm);
+err:
+ PMD_DRV_LOG(ERR, "Failed to set memory table");
+ return -1;
}
+static uint64_t vhost_req_user_to_kernel[] = {
+ [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
+ [VHOST_USER_SET_VRING_CALL] = VHOST_SET_VRING_CALL,
+ [VHOST_USER_SET_VRING_NUM] = VHOST_SET_VRING_NUM,
+ [VHOST_USER_SET_VRING_BASE] = VHOST_SET_VRING_BASE,
+ [VHOST_USER_GET_VRING_BASE] = VHOST_GET_VRING_BASE,
+ [VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR,
+ [VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK,
+};
+
static int
vhost_kernel_send_request(struct virtio_user_dev *dev,
enum vhost_user_request req,
int ret = -1;
unsigned int i;
uint64_t req_kernel;
- struct vhost_memory_kernel *vm = NULL;
int vhostfd;
unsigned int queue_sel;
req_kernel = vhost_req_user_to_kernel[req];
- if (req_kernel == VHOST_SET_MEM_TABLE) {
- vm = prepare_vhost_memory_kernel();
- if (!vm)
- return -1;
- arg = (void *)vm;
- }
-
switch (req_kernel) {
case VHOST_SET_VRING_NUM:
case VHOST_SET_VRING_ADDR:
ret = ioctl(vhostfd, req_kernel, arg);
}
- if (vm)
- free(vm);
-
if (ret < 0)
PMD_DRV_LOG(ERR, "%s failed: %s",
vhost_msg_strings[req], strerror(errno));
.set_owner = vhost_kernel_set_owner,
.get_features = vhost_kernel_get_features,
.set_features = vhost_kernel_set_features,
+ .set_memory_table = vhost_kernel_set_memory_table,
.send_request = vhost_kernel_send_request,
.enable_qp = vhost_kernel_enable_queue_pair
};
r = sendmsg(fd, &msgh, 0);
} while (r < 0 && errno == EINTR);
+ if (r < 0)
+ PMD_DRV_LOG(ERR, "Failed to send msg: %s", strerror(errno));
+
return r;
}
return -1;
}
+static int
+vhost_user_check_reply_ack(struct virtio_user_dev *dev, struct vhost_user_msg *msg)
+{
+ enum vhost_user_request req = msg->request;
+ int ret;
+
+ if (!(msg->flags & VHOST_USER_NEED_REPLY_MASK))
+ return 0;
+
+ ret = vhost_user_read(dev->vhostfd, msg);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to read reply-ack");
+ return -1;
+ }
+
+ if (req != msg->request) {
+ PMD_DRV_LOG(ERR, "Unexpected reply-ack request type (%d)", msg->request);
+ return -1;
+ }
+
+ if (msg->size != sizeof(msg->payload.u64)) {
+ PMD_DRV_LOG(ERR, "Unexpected reply-ack payload size (%u)", msg->size);
+ return -1;
+ }
+
+ if (msg->payload.u64) {
+ PMD_DRV_LOG(ERR, "Slave replied NACK to request type (%d)", msg->request);
+ return -1;
+ }
+
+ return 0;
+}
+
static int
vhost_user_set_owner(struct virtio_user_dev *dev)
{
}
static int
-prepare_vhost_memory_user(struct vhost_user_msg *msg, int fds[])
+vhost_user_set_memory_table(struct virtio_user_dev *dev)
{
struct walk_arg wa;
+ int fds[VHOST_MEMORY_MAX_NREGIONS];
+ int ret, fd_num;
+ struct vhost_user_msg msg = {
+ .request = VHOST_USER_SET_MEM_TABLE,
+ .flags = VHOST_USER_VERSION,
+ };
+
+ if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK))
+ msg.flags |= VHOST_USER_NEED_REPLY_MASK;
wa.region_nr = 0;
- wa.vm = &msg->payload.memory;
+ wa.vm = &msg.payload.memory;
wa.fds = fds;
/*
* The memory lock has already been taken by memory subsystem
* or virtio_user_start_device().
*/
- if (rte_memseg_walk_thread_unsafe(update_memory_region, &wa) < 0)
- return -1;
+ ret = rte_memseg_walk_thread_unsafe(update_memory_region, &wa);
+ if (ret < 0)
+ goto err;
- msg->payload.memory.nregions = wa.region_nr;
- msg->payload.memory.padding = 0;
+ fd_num = wa.region_nr;
+ msg.payload.memory.nregions = wa.region_nr;
+ msg.payload.memory.padding = 0;
- return 0;
+ msg.size = sizeof(msg.payload.memory.nregions);
+ msg.size += sizeof(msg.payload.memory.padding);
+ msg.size += fd_num * sizeof(struct vhost_memory_region);
+
+ ret = vhost_user_write(dev->vhostfd, &msg, fds, fd_num);
+ if (ret < 0)
+ goto err;
+
+ return vhost_user_check_reply_ack(dev, &msg);
+err:
+ PMD_DRV_LOG(ERR, "Failed to set memory table");
+ return -1;
}
static struct vhost_user_msg m;
[VHOST_USER_GET_VRING_BASE] = "VHOST_GET_VRING_BASE",
[VHOST_USER_SET_VRING_ADDR] = "VHOST_SET_VRING_ADDR",
[VHOST_USER_SET_VRING_KICK] = "VHOST_SET_VRING_KICK",
- [VHOST_USER_SET_MEM_TABLE] = "VHOST_SET_MEM_TABLE",
[VHOST_USER_SET_VRING_ENABLE] = "VHOST_SET_VRING_ENABLE",
[VHOST_USER_SET_STATUS] = "VHOST_SET_STATUS",
[VHOST_USER_GET_STATUS] = "VHOST_GET_STATUS",
case VHOST_USER_RESET_OWNER:
break;
- case VHOST_USER_SET_MEM_TABLE:
- if (prepare_vhost_memory_user(&msg, fds) < 0)
- return -1;
- fd_num = msg.payload.memory.nregions;
- msg.size = sizeof(m.payload.memory.nregions);
- msg.size += sizeof(m.payload.memory.padding);
- msg.size += fd_num * sizeof(struct vhost_memory_region);
-
- if (has_reply_ack)
- msg.flags |= VHOST_USER_NEED_REPLY_MASK;
- break;
-
case VHOST_USER_SET_LOG_FD:
fds[fd_num++] = *((int *)arg);
break;
.set_features = vhost_user_set_features,
.get_protocol_features = vhost_user_get_protocol_features,
.set_protocol_features = vhost_user_set_protocol_features,
+ .set_memory_table = vhost_user_set_memory_table,
.send_request = vhost_user_sock,
.enable_qp = vhost_user_enable_queue_pair
};
#define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
-#define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, void *)
#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
#define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
[VHOST_USER_GET_VRING_BASE] = VHOST_GET_VRING_BASE,
[VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR,
[VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK,
- [VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
[VHOST_USER_SET_STATUS] = VHOST_VDPA_SET_STATUS,
[VHOST_USER_GET_STATUS] = VHOST_VDPA_GET_STATUS,
[VHOST_USER_SET_VRING_ENABLE] = VHOST_VDPA_SET_VRING_ENABLE,
}
static int
-vhost_vdpa_dma_map_all(struct virtio_user_dev *dev)
+vhost_vdpa_set_memory_table(struct virtio_user_dev *dev)
{
int ret;
req_vdpa = vhost_req_user_to_vdpa[req];
- if (req_vdpa == VHOST_SET_MEM_TABLE)
- return vhost_vdpa_dma_map_all(dev);
-
switch (req_vdpa) {
case VHOST_SET_VRING_NUM:
case VHOST_SET_VRING_ADDR:
.set_features = vhost_vdpa_set_features,
.get_protocol_features = vhost_vdpa_get_backend_features,
.set_protocol_features = vhost_vdpa_set_backend_features,
+ .set_memory_table = vhost_vdpa_set_memory_table,
.send_request = vhost_vdpa_send_request,
.enable_qp = vhost_vdpa_enable_queue_pair,
.dma_map = vhost_vdpa_dma_map_batch,
goto error;
/* Step 2: share memory regions */
- ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
+ ret = dev->ops->set_memory_table(dev);
if (ret < 0)
goto error;
dev->ops->enable_qp(dev, i, 0);
/* Step 2: update memory regions */
- dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
+ dev->ops->set_memory_table(dev);
/* Step 3: resume the active queues */
for (i = 0; i < dev->queue_pairs; i++)