#define INFLIGHT_ALIGNMENT 64
#define INFLIGHT_VERSION 0x1
-static const char *vhost_message_str[VHOST_USER_MAX] = {
- [VHOST_USER_NONE] = "VHOST_USER_NONE",
- [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
- [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
- [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
- [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
- [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
- [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
- [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
- [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
- [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
- [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
- [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
- [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
- [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
- [VHOST_USER_SET_VRING_ERR] = "VHOST_USER_SET_VRING_ERR",
- [VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES",
- [VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES",
- [VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM",
- [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
- [VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP",
- [VHOST_USER_NET_SET_MTU] = "VHOST_USER_NET_SET_MTU",
- [VHOST_USER_SET_SLAVE_REQ_FD] = "VHOST_USER_SET_SLAVE_REQ_FD",
- [VHOST_USER_IOTLB_MSG] = "VHOST_USER_IOTLB_MSG",
- [VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS",
- [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
- [VHOST_USER_POSTCOPY_ADVISE] = "VHOST_USER_POSTCOPY_ADVISE",
- [VHOST_USER_POSTCOPY_LISTEN] = "VHOST_USER_POSTCOPY_LISTEN",
- [VHOST_USER_POSTCOPY_END] = "VHOST_USER_POSTCOPY_END",
- [VHOST_USER_GET_INFLIGHT_FD] = "VHOST_USER_GET_INFLIGHT_FD",
- [VHOST_USER_SET_INFLIGHT_FD] = "VHOST_USER_SET_INFLIGHT_FD",
- [VHOST_USER_SET_STATUS] = "VHOST_USER_SET_STATUS",
- [VHOST_USER_GET_STATUS] = "VHOST_USER_GET_STATUS",
-};
+typedef struct vhost_message_handler {
+ const char *description;
+ int (*callback)(struct virtio_net **pdev, struct vhu_msg_context *ctx,
+ int main_fd);
+ bool accepts_fd;
+} vhost_message_handler_t;
+static vhost_message_handler_t vhost_message_handlers[];
static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
VHOST_LOG_CONFIG(ERR, "(%s) expect %d FDs for request %s, received %d\n",
dev->ifname, expected_fds,
- vhost_message_str[ctx->msg.request.master],
+ vhost_message_handlers[ctx->msg.request.master].description,
ctx->fd_num);
close_msg_fds(ctx);
return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
}
-static int
-async_dma_map(struct virtio_net *dev, struct rte_vhost_mem_region *region, bool do_map)
+static void
+async_dma_map(struct virtio_net *dev, bool do_map)
{
- uint64_t host_iova;
int ret = 0;
+ uint32_t i;
+ struct guest_page *page;
- host_iova = rte_mem_virt2iova((void *)(uintptr_t)region->host_user_addr);
if (do_map) {
- /* Add mapped region into the default container of DPDK. */
- ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
- region->host_user_addr,
- host_iova,
- region->size);
- if (ret) {
- /*
- * DMA device may bind with kernel driver, in this case,
- * we don't need to program IOMMU manually. However, if no
- * device is bound with vfio/uio in DPDK, and vfio kernel
- * module is loaded, the API will still be called and return
- * with ENODEV/ENOSUP.
- *
- * DPDK vfio only returns ENODEV/ENOSUP in very similar
- * situations(vfio either unsupported, or supported
- * but no devices found). Either way, no mappings could be
- * performed. We treat it as normal case in async path.
- */
- if (rte_errno == ENODEV || rte_errno == ENOTSUP)
- return 0;
-
- VHOST_LOG_CONFIG(ERR, "(%s) DMA engine map failed\n", dev->ifname);
- /* DMA mapping errors won't stop VHST_USER_SET_MEM_TABLE. */
- return 0;
+ for (i = 0; i < dev->nr_guest_pages; i++) {
+ page = &dev->guest_pages[i];
+ ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
+ page->host_user_addr,
+ page->host_iova,
+ page->size);
+ if (ret) {
+ /*
+ * DMA device may bind with kernel driver, in this case,
+ * we don't need to program IOMMU manually. However, if no
+ * device is bound with vfio/uio in DPDK, and vfio kernel
+ * module is loaded, the API will still be called and return
+ * with ENODEV.
+ *
+ * DPDK vfio only returns ENODEV in very similar situations
+ * (vfio either unsupported, or supported but no devices found).
+ * Either way, no mappings could be performed. We treat it as
+ * normal case in async path. This is a workaround.
+ */
+ if (rte_errno == ENODEV)
+ return;
+
+ /* DMA mapping errors won't stop VHOST_USER_SET_MEM_TABLE. */
+ VHOST_LOG_CONFIG(ERR, "DMA engine map failed\n");
+ }
}
} else {
- /* Remove mapped region from the default container of DPDK. */
- ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
- region->host_user_addr,
- host_iova,
- region->size);
- if (ret) {
- /* like DMA map, ignore the kernel driver case when unmap. */
- if (rte_errno == EINVAL)
- return 0;
-
- VHOST_LOG_CONFIG(ERR, "(%s) DMA engine unmap failed\n", dev->ifname);
- return ret;
+ for (i = 0; i < dev->nr_guest_pages; i++) {
+ page = &dev->guest_pages[i];
+ ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
+ page->host_user_addr,
+ page->host_iova,
+ page->size);
+ if (ret) {
+ /* like DMA map, ignore the kernel driver case when unmap. */
+ if (rte_errno == EINVAL)
+ return;
+
+ VHOST_LOG_CONFIG(ERR, "DMA engine unmap failed\n");
+ }
}
}
-
- return ret;
}
static void
if (!dev || !dev->mem)
return;
+ if (dev->async_copy && rte_vfio_is_enabled("vfio"))
+ async_dma_map(dev, false);
+
for (i = 0; i < dev->mem->nregions; i++) {
reg = &dev->mem->regions[i];
if (reg->host_user_addr) {
- if (dev->async_copy && rte_vfio_is_enabled("vfio"))
- async_dma_map(dev, reg, false);
-
munmap(reg->mmap_addr, reg->mmap_size);
close(reg->fd);
}
* the device hasn't been initialised.
*/
static int
-vhost_user_set_owner(struct virtio_net **pdev,
- struct vhu_msg_context *ctx,
+vhost_user_set_owner(struct virtio_net **pdev __rte_unused,
+ struct vhu_msg_context *ctx __rte_unused,
int main_fd __rte_unused)
{
- struct virtio_net *dev = *pdev;
-
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
return RTE_VHOST_MSG_RESULT_OK;
}
static int
vhost_user_reset_owner(struct virtio_net **pdev,
- struct vhu_msg_context *ctx,
+ struct vhu_msg_context *ctx __rte_unused,
int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
vhost_destroy_device_notify(dev);
cleanup_device(dev, 0);
struct virtio_net *dev = *pdev;
uint64_t features = 0;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
rte_vhost_driver_get_features(dev->ifname, &features);
ctx->msg.payload.u64 = features;
struct virtio_net *dev = *pdev;
uint32_t queue_num = 0;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
ctx->msg.payload.u64 = (uint64_t)queue_num;
uint64_t vhost_features = 0;
struct rte_vdpa_device *vdpa_dev;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
rte_vhost_driver_get_features(dev->ifname, &vhost_features);
if (features & ~vhost_features) {
VHOST_LOG_CONFIG(ERR, "(%s) received invalid negotiated features.\n",
struct virtio_net *dev = *pdev;
struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index];
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
if (ctx->msg.payload.state.num > 32768) {
VHOST_LOG_CONFIG(ERR, "(%s) invalid virtqueue size %u\n",
dev->ifname, ctx->msg.payload.state.num);
struct vhost_vring_addr *addr = &ctx->msg.payload.addr;
bool access_ok;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
if (dev->mem == NULL)
return RTE_VHOST_MSG_RESULT_ERR;
struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index];
uint64_t val = ctx->msg.payload.state.num;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
if (vq_is_packed(dev)) {
/*
* Bit[0:14]: avail index
static int
add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
- uint64_t host_iova, uint64_t size)
+ uint64_t host_iova, uint64_t host_user_addr, uint64_t size)
{
struct guest_page *page, *last_page;
struct guest_page *old_pages;
dev->max_guest_pages * sizeof(*page),
RTE_CACHE_LINE_SIZE);
if (dev->guest_pages == NULL) {
- VHOST_LOG_CONFIG(ERR, "(%s) cannot realloc guest_pages\n", dev->ifname);
+ VHOST_LOG_CONFIG(ERR, "cannot realloc guest_pages\n");
rte_free(old_pages);
return -1;
}
if (dev->nr_guest_pages > 0) {
last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
/* merge if the two pages are continuous */
- if (host_iova == last_page->host_iova +
- last_page->size) {
+ if (host_iova == last_page->host_iova + last_page->size &&
+ guest_phys_addr == last_page->guest_phys_addr + last_page->size &&
+ host_user_addr == last_page->host_user_addr + last_page->size) {
last_page->size += size;
return 0;
}
page = &dev->guest_pages[dev->nr_guest_pages++];
page->guest_phys_addr = guest_phys_addr;
page->host_iova = host_iova;
+ page->host_user_addr = host_user_addr;
page->size = size;
return 0;
size = page_size - (guest_phys_addr & (page_size - 1));
size = RTE_MIN(size, reg_size);
- if (add_one_guest_page(dev, guest_phys_addr, host_iova, size) < 0)
+ if (add_one_guest_page(dev, guest_phys_addr, host_iova,
+ host_user_addr, size) < 0)
return -1;
host_user_addr += size;
host_iova = rte_mem_virt2iova((void *)(uintptr_t)
host_user_addr);
if (add_one_guest_page(dev, guest_phys_addr, host_iova,
- size) < 0)
+ host_user_addr, size) < 0)
return -1;
host_user_addr += size;
uint64_t mmap_size;
uint64_t alignment;
int populate;
- int ret;
/* Check for memory_size + mmap_offset overflow */
if (mmap_offset >= -region->size) {
dev->ifname);
return -1;
}
-
- if (rte_vfio_is_enabled("vfio")) {
- ret = async_dma_map(dev, region, true);
- if (ret) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) configure IOMMU for DMA engine failed\n",
- dev->ifname);
- return -1;
- }
- }
}
VHOST_LOG_CONFIG(INFO, "(%s) guest memory region size: 0x%" PRIx64 "\n",
dev->mem->nregions++;
}
+ if (dev->async_copy && rte_vfio_is_enabled("vfio"))
+ async_dma_map(dev, true);
+
if (vhost_user_postcopy_register(dev, main_fd, ctx) < 0)
goto free_mem_table;
int fd, i;
int numa_node = SOCKET_ID_ANY;
+ if (validate_msg_fds(dev, ctx, 1) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
fd = ctx->fds[0];
if (ctx->msg.size != sizeof(ctx->msg.payload.inflight) || fd < 0) {
VHOST_LOG_CONFIG(ERR, "(%s) invalid set_inflight_fd message size is %d,fd is %d\n",
struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index];
uint64_t val;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
/* We have to stop the queue (virtio) if it is running. */
vhost_destroy_device_notify(dev);
bool enable = !!ctx->msg.payload.state.num;
int index = (int)ctx->msg.payload.state.index;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
VHOST_LOG_CONFIG(INFO, "(%s) set queue enable: %d to qp idx: %d\n",
dev->ifname, enable, index);
struct virtio_net *dev = *pdev;
uint64_t features, protocol_features;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
rte_vhost_driver_get_features(dev->ifname, &features);
rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
uint64_t protocol_features = ctx->msg.payload.u64;
uint64_t slave_protocol_features = 0;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
rte_vhost_driver_get_protocol_features(dev->ifname,
&slave_protocol_features);
if (protocol_features & ~slave_protocol_features) {
uint8_t *mac = (uint8_t *)&ctx->msg.payload.u64;
struct rte_vdpa_device *vdpa_dev;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
VHOST_LOG_CONFIG(DEBUG, "(%s) MAC: " RTE_ETHER_ADDR_PRT_FMT "\n",
dev->ifname, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
memcpy(dev->mac.addr_bytes, mac, 6);
{
struct virtio_net *dev = *pdev;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
if (ctx->msg.payload.u64 < VIRTIO_MIN_MTU ||
ctx->msg.payload.u64 > VIRTIO_MAX_MTU) {
VHOST_LOG_CONFIG(ERR, "(%s) invalid MTU size (%"PRIu64")\n",
return is_vring_iotlb_split(vq, imsg);
}
+static int
+vhost_user_get_config(struct virtio_net **pdev,
+ struct vhu_msg_context *ctx,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+ struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
+ int ret = 0;
+
+ if (validate_msg_fds(dev, ctx, 0) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ if (!vdpa_dev) {
+ VHOST_LOG_CONFIG(ERR, "(%s) is not vDPA device!\n",
+ dev->ifname);
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ if (vdpa_dev->ops->get_config) {
+ ret = vdpa_dev->ops->get_config(dev->vid,
+ ctx->msg.payload.cfg.region,
+ ctx->msg.payload.cfg.size);
+ if (ret != 0) {
+ ctx->msg.size = 0;
+ VHOST_LOG_CONFIG(ERR,
+ "(%s) get_config() return error!\n",
+ dev->ifname);
+ }
+ } else {
+ VHOST_LOG_CONFIG(ERR, "(%s) get_config() not supported!\n",
+ dev->ifname);
+ }
+
+ return RTE_VHOST_MSG_RESULT_REPLY;
+}
+
+static int
+vhost_user_set_config(struct virtio_net **pdev,
+ struct vhu_msg_context *ctx,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+ struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
+ int ret = 0;
+
+ if (validate_msg_fds(dev, ctx, 0) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ if (ctx->msg.payload.cfg.size > VHOST_USER_MAX_CONFIG_SIZE) {
+ VHOST_LOG_CONFIG(ERR,
+ "(%s) vhost_user_config size: %"PRIu32", should not be larger than %d\n",
+ dev->ifname, ctx->msg.payload.cfg.size,
+ VHOST_USER_MAX_CONFIG_SIZE);
+ goto out;
+ }
+
+ if (!vdpa_dev) {
+ VHOST_LOG_CONFIG(ERR, "(%s) is not vDPA device!\n",
+ dev->ifname);
+ goto out;
+ }
+
+ if (vdpa_dev->ops->set_config) {
+ ret = vdpa_dev->ops->set_config(dev->vid,
+ ctx->msg.payload.cfg.region,
+ ctx->msg.payload.cfg.offset,
+ ctx->msg.payload.cfg.size,
+ ctx->msg.payload.cfg.flags);
+ if (ret)
+ VHOST_LOG_CONFIG(ERR,
+ "(%s) set_config() return error!\n",
+ dev->ifname);
+ } else {
+ VHOST_LOG_CONFIG(ERR, "(%s) set_config() not supported!\n",
+ dev->ifname);
+ }
+
+ return RTE_VHOST_MSG_RESULT_OK;
+
+out:
+ return RTE_VHOST_MSG_RESULT_ERR;
+}
+
static int
vhost_user_iotlb_msg(struct virtio_net **pdev,
struct vhu_msg_context *ctx,
uint16_t i;
uint64_t vva, len;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
switch (imsg->type) {
case VHOST_IOTLB_UPDATE:
len = imsg->size;
vhost_user_iotlb_cache_insert(dev, vq, imsg->iova, vva,
len, imsg->perm);
- if (is_vring_iotlb(dev, vq, imsg))
+ if (is_vring_iotlb(dev, vq, imsg)) {
+ rte_spinlock_lock(&vq->access_lock);
*pdev = dev = translate_ring_addresses(dev, i);
+ rte_spinlock_unlock(&vq->access_lock);
+ }
}
break;
case VHOST_IOTLB_INVALIDATE:
vhost_user_iotlb_cache_remove(vq, imsg->iova,
imsg->size);
- if (is_vring_iotlb(dev, vq, imsg))
+ if (is_vring_iotlb(dev, vq, imsg)) {
+ rte_spinlock_lock(&vq->access_lock);
vring_invalidate(dev, vq);
+ rte_spinlock_unlock(&vq->access_lock);
+ }
}
break;
default:
#ifdef RTE_LIBRTE_VHOST_POSTCOPY
struct uffdio_api api_struct;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
if (dev->postcopy_ufd == -1) {
{
struct virtio_net *dev = *pdev;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
if (dev->mem && dev->mem->nregions) {
VHOST_LOG_CONFIG(ERR, "(%s) regions already registered at postcopy-listen\n",
dev->ifname);
{
struct virtio_net *dev = *pdev;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
dev->postcopy_listening = 0;
if (dev->postcopy_ufd >= 0) {
close(dev->postcopy_ufd);
{
struct virtio_net *dev = *pdev;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
ctx->msg.payload.u64 = dev->status;
ctx->msg.size = sizeof(ctx->msg.payload.u64);
ctx->fd_num = 0;
{
struct virtio_net *dev = *pdev;
- if (validate_msg_fds(dev, ctx, 0) != 0)
- return RTE_VHOST_MSG_RESULT_ERR;
-
/* As per Virtio specification, the device status is 8bits long */
if (ctx->msg.payload.u64 > UINT8_MAX) {
VHOST_LOG_CONFIG(ERR, "(%s) invalid VHOST_USER_SET_STATUS payload 0x%" PRIx64 "\n",
return RTE_VHOST_MSG_RESULT_OK;
}
-typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
- struct vhu_msg_context *ctx,
- int main_fd);
-
-static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = {
- [VHOST_USER_NONE] = NULL,
- [VHOST_USER_GET_FEATURES] = vhost_user_get_features,
- [VHOST_USER_SET_FEATURES] = vhost_user_set_features,
- [VHOST_USER_SET_OWNER] = vhost_user_set_owner,
- [VHOST_USER_RESET_OWNER] = vhost_user_reset_owner,
- [VHOST_USER_SET_MEM_TABLE] = vhost_user_set_mem_table,
- [VHOST_USER_SET_LOG_BASE] = vhost_user_set_log_base,
- [VHOST_USER_SET_LOG_FD] = vhost_user_set_log_fd,
- [VHOST_USER_SET_VRING_NUM] = vhost_user_set_vring_num,
- [VHOST_USER_SET_VRING_ADDR] = vhost_user_set_vring_addr,
- [VHOST_USER_SET_VRING_BASE] = vhost_user_set_vring_base,
- [VHOST_USER_GET_VRING_BASE] = vhost_user_get_vring_base,
- [VHOST_USER_SET_VRING_KICK] = vhost_user_set_vring_kick,
- [VHOST_USER_SET_VRING_CALL] = vhost_user_set_vring_call,
- [VHOST_USER_SET_VRING_ERR] = vhost_user_set_vring_err,
- [VHOST_USER_GET_PROTOCOL_FEATURES] = vhost_user_get_protocol_features,
- [VHOST_USER_SET_PROTOCOL_FEATURES] = vhost_user_set_protocol_features,
- [VHOST_USER_GET_QUEUE_NUM] = vhost_user_get_queue_num,
- [VHOST_USER_SET_VRING_ENABLE] = vhost_user_set_vring_enable,
- [VHOST_USER_SEND_RARP] = vhost_user_send_rarp,
- [VHOST_USER_NET_SET_MTU] = vhost_user_net_set_mtu,
- [VHOST_USER_SET_SLAVE_REQ_FD] = vhost_user_set_req_fd,
- [VHOST_USER_IOTLB_MSG] = vhost_user_iotlb_msg,
- [VHOST_USER_POSTCOPY_ADVISE] = vhost_user_set_postcopy_advise,
- [VHOST_USER_POSTCOPY_LISTEN] = vhost_user_set_postcopy_listen,
- [VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end,
- [VHOST_USER_GET_INFLIGHT_FD] = vhost_user_get_inflight_fd,
- [VHOST_USER_SET_INFLIGHT_FD] = vhost_user_set_inflight_fd,
- [VHOST_USER_SET_STATUS] = vhost_user_set_status,
- [VHOST_USER_GET_STATUS] = vhost_user_get_status,
+#define VHOST_MESSAGE_HANDLERS \
+VHOST_MESSAGE_HANDLER(VHOST_USER_NONE, NULL, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_FEATURES, vhost_user_get_features, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ADDR, vhost_user_set_vring_addr, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_BASE, vhost_user_set_vring_base, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_VRING_BASE, vhost_user_get_vring_base, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_KICK, vhost_user_set_vring_kick, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_CALL, vhost_user_set_vring_call, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ERR, vhost_user_set_vring_err, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_PROTOCOL_FEATURES, vhost_user_get_protocol_features, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_PROTOCOL_FEATURES, vhost_user_set_protocol_features, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_SLAVE_REQ_FD, vhost_user_set_req_fd, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_ADVISE, vhost_user_set_postcopy_advise, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_LISTEN, vhost_user_set_postcopy_listen, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_END, vhost_user_postcopy_end, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_INFLIGHT_FD, vhost_user_get_inflight_fd, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_INFLIGHT_FD, vhost_user_set_inflight_fd, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_STATUS, vhost_user_set_status, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_STATUS, vhost_user_get_status, false)
+
+#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd) \
+ [id] = { #id, handler, accepts_fd },
+static vhost_message_handler_t vhost_message_handlers[] = {
+ VHOST_MESSAGE_HANDLERS
};
+#undef VHOST_MESSAGE_HANDLER
/* return bytes# of read on success or negative val on failure. */
static int
case VHOST_USER_SET_VRING_ADDR:
vring_idx = ctx->msg.payload.addr.index;
break;
+ case VHOST_USER_SET_INFLIGHT_FD:
+ vring_idx = ctx->msg.payload.inflight.num_queues - 1;
+ break;
default:
return 0;
}
{
struct virtio_net *dev;
struct vhu_msg_context ctx;
+ vhost_message_handler_t *msg_handler;
struct rte_vdpa_device *vdpa_dev;
int ret;
int unlock_required = 0;
bool handled;
- int request;
+ uint32_t request;
uint32_t i;
dev = get_device(vid);
return -1;
}
- ret = 0;
request = ctx.msg.request.master;
- if (request > VHOST_USER_NONE && request < VHOST_USER_MAX &&
- vhost_message_str[request]) {
+ if (request > VHOST_USER_NONE && request < RTE_DIM(vhost_message_handlers))
+ msg_handler = &vhost_message_handlers[request];
+ else
+ msg_handler = NULL;
+
+ if (msg_handler != NULL && msg_handler->description != NULL) {
if (request != VHOST_USER_IOTLB_MSG)
VHOST_LOG_CONFIG(INFO, "(%s) read message %s\n",
- dev->ifname, vhost_message_str[request]);
+ dev->ifname, msg_handler->description);
else
VHOST_LOG_CONFIG(DEBUG, "(%s) read message %s\n",
- dev->ifname, vhost_message_str[request]);
+ dev->ifname, msg_handler->description);
} else {
VHOST_LOG_CONFIG(DEBUG, "(%s) external request %d\n", dev->ifname, request);
}
handled = false;
if (dev->extern_ops.pre_msg_handle) {
- ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
- (void *)&ctx.msg);
+ RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
+ ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
switch (ret) {
case RTE_VHOST_MSG_RESULT_REPLY:
send_vhost_reply(dev, fd, &ctx);
}
}
- if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) {
- if (!vhost_message_handlers[request])
- goto skip_to_post_handle;
- ret = vhost_message_handlers[request](&dev, &ctx, fd);
+ if (msg_handler == NULL || msg_handler->callback == NULL)
+ goto skip_to_post_handle;
- switch (ret) {
- case RTE_VHOST_MSG_RESULT_ERR:
- VHOST_LOG_CONFIG(ERR, "(%s) processing %s failed.\n",
- dev->ifname, vhost_message_str[request]);
- handled = true;
- break;
- case RTE_VHOST_MSG_RESULT_OK:
- VHOST_LOG_CONFIG(DEBUG, "(%s) processing %s succeeded.\n",
- dev->ifname, vhost_message_str[request]);
- handled = true;
- break;
- case RTE_VHOST_MSG_RESULT_REPLY:
- VHOST_LOG_CONFIG(DEBUG, "(%s) processing %s succeeded and needs reply.\n",
- dev->ifname, vhost_message_str[request]);
- send_vhost_reply(dev, fd, &ctx);
- handled = true;
- break;
- default:
- break;
- }
+ if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0) {
+ ret = RTE_VHOST_MSG_RESULT_ERR;
+ } else {
+ ret = msg_handler->callback(&dev, &ctx, fd);
+ }
+
+ switch (ret) {
+ case RTE_VHOST_MSG_RESULT_ERR:
+ VHOST_LOG_CONFIG(ERR, "(%s) processing %s failed.\n",
+ dev->ifname, msg_handler->description);
+ handled = true;
+ break;
+ case RTE_VHOST_MSG_RESULT_OK:
+ VHOST_LOG_CONFIG(DEBUG, "(%s) processing %s succeeded.\n",
+ dev->ifname, msg_handler->description);
+ handled = true;
+ break;
+ case RTE_VHOST_MSG_RESULT_REPLY:
+ VHOST_LOG_CONFIG(DEBUG, "(%s) processing %s succeeded and needs reply.\n",
+ dev->ifname, msg_handler->description);
+ send_vhost_reply(dev, fd, &ctx);
+ handled = true;
+ break;
+ default:
+ break;
}
skip_to_post_handle:
if (ret != RTE_VHOST_MSG_RESULT_ERR &&
dev->extern_ops.post_msg_handle) {
- ret = (*dev->extern_ops.post_msg_handle)(dev->vid,
- (void *)&ctx.msg);
+ RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
+ ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
switch (ret) {
case RTE_VHOST_MSG_RESULT_REPLY:
send_vhost_reply(dev, fd, &ctx);
send_vhost_reply(dev, fd, &ctx);
} else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
VHOST_LOG_CONFIG(ERR, "(%s) vhost message handling failed.\n", dev->ifname);
- return -1;
+ ret = -1;
+ goto unlock;
}
+ ret = 0;
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
bool cur_ready = vq_is_ready(dev, vq);
}
}
+unlock:
if (unlock_required)
vhost_user_unlock_all_queue_pairs(dev);
- if (!virtio_is_ready(dev))
+ if (ret != 0 || !virtio_is_ready(dev))
goto out;
/*
}
out:
- return 0;
+ return ret;
}
static int process_slave_message_reply(struct virtio_net *dev,