+ if (vq_is_packed(dev))
+ pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
+ else
+ pervq_inflight_size = get_pervq_shm_size_split(queue_size);
+
+ VHOST_LOG_CONFIG(INFO,
+ "set_inflight_fd mmap_size: %"PRIu64"\n", mmap_size);
+ VHOST_LOG_CONFIG(INFO,
+ "set_inflight_fd mmap_offset: %"PRIu64"\n", mmap_offset);
+ VHOST_LOG_CONFIG(INFO,
+ "set_inflight_fd num_queues: %u\n", num_queues);
+ VHOST_LOG_CONFIG(INFO,
+ "set_inflight_fd queue_size: %u\n", queue_size);
+ VHOST_LOG_CONFIG(INFO,
+ "set_inflight_fd fd: %d\n", fd);
+ VHOST_LOG_CONFIG(INFO,
+ "set_inflight_fd pervq_inflight_size: %d\n",
+ pervq_inflight_size);
+
+ if (!dev->inflight_info) {
+ dev->inflight_info = calloc(1,
+ sizeof(struct inflight_mem_info));
+ if (dev->inflight_info == NULL) {
+ VHOST_LOG_CONFIG(ERR,
+ "failed to alloc dev inflight area\n");
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+ dev->inflight_info->fd = -1;
+ }
+
+ if (dev->inflight_info->addr) {
+ munmap(dev->inflight_info->addr, dev->inflight_info->size);
+ dev->inflight_info->addr = NULL;
+ }
+
+ addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd, mmap_offset);
+ if (addr == MAP_FAILED) {
+ VHOST_LOG_CONFIG(ERR, "failed to mmap share memory.\n");
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ if (dev->inflight_info->fd >= 0) {
+ close(dev->inflight_info->fd);
+ dev->inflight_info->fd = -1;
+ }
+
+ dev->inflight_info->fd = fd;
+ dev->inflight_info->addr = addr;
+ dev->inflight_info->size = mmap_size;
+
+ for (i = 0; i < num_queues; i++) {
+ vq = dev->virtqueue[i];
+ if (vq_is_packed(dev)) {
+ vq->inflight_packed = addr;
+ vq->inflight_packed->desc_num = queue_size;
+ } else {
+ vq->inflight_split = addr;
+ vq->inflight_split->desc_num = queue_size;
+ }
+ addr = (void *)((char *)addr + pervq_inflight_size);
+ }
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+ struct vhost_vring_file file;
+ struct vhost_virtqueue *vq;
+ int expected_fds;
+
+ expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
+ if (validate_msg_fds(msg, expected_fds) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+ if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
+ file.fd = VIRTIO_INVALID_EVENTFD;
+ else
+ file.fd = msg->fds[0];
+ VHOST_LOG_CONFIG(INFO,
+ "vring call idx:%d file:%d\n", file.index, file.fd);
+
+ vq = dev->virtqueue[file.index];
+ if (vq->callfd >= 0)
+ close(vq->callfd);
+
+ vq->callfd = file.fd;
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ int expected_fds;
+
+ expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
+ if (validate_msg_fds(msg, expected_fds) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK))
+ close(msg->fds[0]);
+ VHOST_LOG_CONFIG(INFO, "not implemented\n");
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+resubmit_desc_compare(const void *a, const void *b)
+{
+ const struct rte_vhost_resubmit_desc *desc0 = a;
+ const struct rte_vhost_resubmit_desc *desc1 = b;
+
+ if (desc1->counter > desc0->counter)
+ return 1;
+
+ return -1;
+}
+
+static int
+vhost_check_queue_inflights_split(struct virtio_net *dev,
+ struct vhost_virtqueue *vq)
+{
+ uint16_t i;
+ uint16_t resubmit_num = 0, last_io, num;
+ struct vring_used *used = vq->used;
+ struct rte_vhost_resubmit_info *resubmit;
+ struct rte_vhost_inflight_info_split *inflight_split;
+
+ if (!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
+ return RTE_VHOST_MSG_RESULT_OK;
+
+ /* The frontend may still not support the inflight feature
+ * although we negotiate the protocol feature.
+ */
+ if ((!vq->inflight_split))
+ return RTE_VHOST_MSG_RESULT_OK;
+
+ if (!vq->inflight_split->version) {
+ vq->inflight_split->version = INFLIGHT_VERSION;
+ return RTE_VHOST_MSG_RESULT_OK;
+ }
+
+ if (vq->resubmit_inflight)
+ return RTE_VHOST_MSG_RESULT_OK;
+
+ inflight_split = vq->inflight_split;
+ vq->global_counter = 0;
+ last_io = inflight_split->last_inflight_io;
+
+ if (inflight_split->used_idx != used->idx) {
+ inflight_split->desc[last_io].inflight = 0;
+ rte_smp_mb();
+ inflight_split->used_idx = used->idx;
+ }
+
+ for (i = 0; i < inflight_split->desc_num; i++) {
+ if (inflight_split->desc[i].inflight == 1)
+ resubmit_num++;
+ }
+
+ vq->last_avail_idx += resubmit_num;
+
+ if (resubmit_num) {
+ resubmit = calloc(1, sizeof(struct rte_vhost_resubmit_info));
+ if (!resubmit) {
+ VHOST_LOG_CONFIG(ERR,
+ "failed to allocate memory for resubmit info.\n");
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ resubmit->resubmit_list = calloc(resubmit_num,
+ sizeof(struct rte_vhost_resubmit_desc));
+ if (!resubmit->resubmit_list) {
+ VHOST_LOG_CONFIG(ERR,
+ "failed to allocate memory for inflight desc.\n");
+ free(resubmit);
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ num = 0;
+ for (i = 0; i < vq->inflight_split->desc_num; i++) {
+ if (vq->inflight_split->desc[i].inflight == 1) {
+ resubmit->resubmit_list[num].index = i;
+ resubmit->resubmit_list[num].counter =
+ inflight_split->desc[i].counter;
+ num++;
+ }
+ }
+ resubmit->resubmit_num = num;
+
+ if (resubmit->resubmit_num > 1)
+ qsort(resubmit->resubmit_list, resubmit->resubmit_num,
+ sizeof(struct rte_vhost_resubmit_desc),
+ resubmit_desc_compare);
+
+ vq->global_counter = resubmit->resubmit_list[0].counter + 1;
+ vq->resubmit_inflight = resubmit;
+ }
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+vhost_check_queue_inflights_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq)
+{
+ uint16_t i;
+ uint16_t resubmit_num = 0, old_used_idx, num;
+ struct rte_vhost_resubmit_info *resubmit;
+ struct rte_vhost_inflight_info_packed *inflight_packed;
+
+ if (!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
+ return RTE_VHOST_MSG_RESULT_OK;
+
+ /* The frontend may still not support the inflight feature
+ * although we negotiate the protocol feature.
+ */
+ if ((!vq->inflight_packed))
+ return RTE_VHOST_MSG_RESULT_OK;
+
+ if (!vq->inflight_packed->version) {
+ vq->inflight_packed->version = INFLIGHT_VERSION;
+ return RTE_VHOST_MSG_RESULT_OK;
+ }
+
+ if (vq->resubmit_inflight)
+ return RTE_VHOST_MSG_RESULT_OK;
+
+ inflight_packed = vq->inflight_packed;
+ vq->global_counter = 0;
+ old_used_idx = inflight_packed->old_used_idx;
+
+ if (inflight_packed->used_idx != old_used_idx) {
+ if (inflight_packed->desc[old_used_idx].inflight == 0) {
+ inflight_packed->old_used_idx =
+ inflight_packed->used_idx;
+ inflight_packed->old_used_wrap_counter =
+ inflight_packed->used_wrap_counter;
+ inflight_packed->old_free_head =
+ inflight_packed->free_head;
+ } else {
+ inflight_packed->used_idx =
+ inflight_packed->old_used_idx;
+ inflight_packed->used_wrap_counter =
+ inflight_packed->old_used_wrap_counter;
+ inflight_packed->free_head =
+ inflight_packed->old_free_head;
+ }
+ }
+
+ for (i = 0; i < inflight_packed->desc_num; i++) {
+ if (inflight_packed->desc[i].inflight == 1)
+ resubmit_num++;
+ }
+
+ if (resubmit_num) {
+ resubmit = calloc(1, sizeof(struct rte_vhost_resubmit_info));
+ if (resubmit == NULL) {
+ VHOST_LOG_CONFIG(ERR,
+ "failed to allocate memory for resubmit info.\n");
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ resubmit->resubmit_list = calloc(resubmit_num,
+ sizeof(struct rte_vhost_resubmit_desc));
+ if (resubmit->resubmit_list == NULL) {
+ VHOST_LOG_CONFIG(ERR,
+ "failed to allocate memory for resubmit desc.\n");
+ free(resubmit);
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ num = 0;
+ for (i = 0; i < inflight_packed->desc_num; i++) {
+ if (vq->inflight_packed->desc[i].inflight == 1) {
+ resubmit->resubmit_list[num].index = i;
+ resubmit->resubmit_list[num].counter =
+ inflight_packed->desc[i].counter;
+ num++;
+ }
+ }
+ resubmit->resubmit_num = num;
+
+ if (resubmit->resubmit_num > 1)
+ qsort(resubmit->resubmit_list, resubmit->resubmit_num,
+ sizeof(struct rte_vhost_resubmit_desc),
+ resubmit_desc_compare);
+
+ vq->global_counter = resubmit->resubmit_list[0].counter + 1;
+ vq->resubmit_inflight = resubmit;
+ }
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+ struct vhost_vring_file file;
+ struct vhost_virtqueue *vq;
+ int expected_fds;
+
+ expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
+ if (validate_msg_fds(msg, expected_fds) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+ if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
+ file.fd = VIRTIO_INVALID_EVENTFD;
+ else
+ file.fd = msg->fds[0];
+ VHOST_LOG_CONFIG(INFO,
+ "vring kick idx:%d file:%d\n", file.index, file.fd);
+
+ /* Interpret ring addresses only when ring is started. */
+ dev = translate_ring_addresses(dev, file.index);
+ if (!dev)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ *pdev = dev;
+
+ vq = dev->virtqueue[file.index];