1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
12 #include <sys/eventfd.h>
13 #include <sys/types.h>
16 #include <rte_string_fns.h>
17 #include <rte_eal_memconfig.h>
20 #include "virtio_user_dev.h"
21 #include "../virtio_ethdev.h"
23 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
25 const char * const virtio_user_backend_strings[] = {
26 [VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN",
27 [VIRTIO_USER_BACKEND_VHOST_USER] = "VHOST_USER",
28 [VIRTIO_USER_BACKEND_VHOST_KERNEL] = "VHOST_NET",
29 [VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA",
33 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
35 /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
36 * firstly because vhost depends on this msg to allocate virtqueue
39 struct vhost_vring_file file;
41 file.index = queue_sel;
42 file.fd = dev->callfds[queue_sel];
43 dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file);
49 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
51 struct vhost_vring_file file;
52 struct vhost_vring_state state;
53 struct vring *vring = &dev->vrings[queue_sel];
54 struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
55 struct vhost_vring_addr addr = {
58 .flags = 0, /* disable log */
61 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
63 (uint64_t)(uintptr_t)pq_vring->desc;
64 addr.avail_user_addr =
65 (uint64_t)(uintptr_t)pq_vring->driver;
67 (uint64_t)(uintptr_t)pq_vring->device;
69 addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
70 addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
71 addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
74 state.index = queue_sel;
75 state.num = vring->num;
76 dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
78 state.index = queue_sel;
79 state.num = 0; /* no reservation */
80 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
81 state.num |= (1 << 15);
82 dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
84 dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
86 /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
87 * lastly because vhost depends on this msg to judge if
90 file.index = queue_sel;
91 file.fd = dev->kickfds[queue_sel];
92 dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file);
98 virtio_user_queue_setup(struct virtio_user_dev *dev,
99 int (*fn)(struct virtio_user_dev *, uint32_t))
101 uint32_t i, queue_sel;
103 for (i = 0; i < dev->max_queue_pairs; ++i) {
104 queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
105 if (fn(dev, queue_sel) < 0) {
106 PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i);
110 for (i = 0; i < dev->max_queue_pairs; ++i) {
111 queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
112 if (fn(dev, queue_sel) < 0) {
113 PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i);
122 virtio_user_dev_set_features(struct virtio_user_dev *dev)
127 pthread_mutex_lock(&dev->mutex);
129 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER &&
133 /* Step 0: tell vhost to create queues */
134 if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
137 features = dev->features;
139 /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
140 features &= ~(1ull << VIRTIO_NET_F_MAC);
141 /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
142 features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
143 features &= ~(1ull << VIRTIO_NET_F_STATUS);
144 ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
147 PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
149 pthread_mutex_unlock(&dev->mutex);
155 virtio_user_start_device(struct virtio_user_dev *dev)
162 * We need to make sure that the locks will be
163 * taken in the correct order to avoid deadlocks.
165 * Before releasing this lock, this thread should
166 * not trigger any memory hotplug events.
168 * This is a temporary workaround, and should be
169 * replaced when we get proper supports from the
170 * memory subsystem in the future.
172 rte_mcfg_mem_read_lock();
173 pthread_mutex_lock(&dev->mutex);
175 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER &&
179 /* Step 2: share memory regions */
180 ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
184 /* Step 3: kick queues */
185 if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0)
188 /* Step 4: enable queues
189 * we enable the 1st queue pair by default.
191 dev->ops->enable_qp(dev, 0, 1);
194 pthread_mutex_unlock(&dev->mutex);
195 rte_mcfg_mem_read_unlock();
199 pthread_mutex_unlock(&dev->mutex);
200 rte_mcfg_mem_read_unlock();
201 /* TODO: free resource here or caller to check */
205 int virtio_user_stop_device(struct virtio_user_dev *dev)
207 struct vhost_vring_state state;
211 pthread_mutex_lock(&dev->mutex);
215 for (i = 0; i < dev->max_queue_pairs; ++i)
216 dev->ops->enable_qp(dev, i, 0);
218 /* Stop the backend. */
219 for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
221 if (dev->ops->send_request(dev, VHOST_USER_GET_VRING_BASE,
223 PMD_DRV_LOG(ERR, "get_vring_base failed, index=%u\n",
230 dev->started = false;
232 pthread_mutex_unlock(&dev->mutex);
238 parse_mac(struct virtio_user_dev *dev, const char *mac)
240 struct rte_ether_addr tmp;
245 if (rte_ether_unformat_addr(mac, &tmp) == 0) {
246 memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN);
247 dev->mac_specified = 1;
249 /* ignore the wrong mac, use random mac */
250 PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac);
255 virtio_user_dev_init_notify(struct virtio_user_dev *dev)
261 for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
262 if (i >= dev->max_queue_pairs * 2) {
263 dev->kickfds[i] = -1;
264 dev->callfds[i] = -1;
268 /* May use invalid flag, but some backend uses kickfd and
269 * callfd as criteria to judge if dev is alive. so finally we
272 callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
274 PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
277 kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
280 PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
283 dev->callfds[i] = callfd;
284 dev->kickfds[i] = kickfd;
287 if (i < VIRTIO_MAX_VIRTQUEUES) {
288 for (j = 0; j < i; ++j) {
289 close(dev->callfds[j]);
290 close(dev->kickfds[j]);
300 virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
303 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
305 if (!eth_dev->intr_handle) {
306 eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle));
307 if (!eth_dev->intr_handle) {
308 PMD_DRV_LOG(ERR, "fail to allocate intr_handle");
311 memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle));
314 for (i = 0; i < dev->max_queue_pairs; ++i)
315 eth_dev->intr_handle->efds[i] = dev->callfds[i];
316 eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
317 eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
318 eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
319 /* For virtio vdev, no need to read counter for clean */
320 eth_dev->intr_handle->efd_counter_size = 0;
321 eth_dev->intr_handle->fd = -1;
322 if (dev->vhostfd >= 0)
323 eth_dev->intr_handle->fd = dev->vhostfd;
324 else if (dev->is_server)
325 eth_dev->intr_handle->fd = dev->listenfd;
331 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
333 size_t len __rte_unused,
336 struct virtio_user_dev *dev = arg;
337 struct rte_memseg_list *msl;
340 /* ignore externally allocated memory */
341 msl = rte_mem_virt2memseg_list(addr);
345 pthread_mutex_lock(&dev->mutex);
347 if (dev->started == false)
350 /* Step 1: pause the active queues */
351 for (i = 0; i < dev->queue_pairs; i++)
352 dev->ops->enable_qp(dev, i, 0);
354 /* Step 2: update memory regions */
355 dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
357 /* Step 3: resume the active queues */
358 for (i = 0; i < dev->queue_pairs; i++)
359 dev->ops->enable_qp(dev, i, 1);
362 pthread_mutex_unlock(&dev->mutex);
366 virtio_user_dev_setup(struct virtio_user_dev *dev)
371 dev->vhostfds = NULL;
374 if (dev->is_server) {
375 if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) {
376 PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!");
379 dev->ops = &virtio_ops_user;
381 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) {
382 dev->ops = &virtio_ops_user;
383 } else if (dev->backend_type ==
384 VIRTIO_USER_BACKEND_VHOST_KERNEL) {
385 dev->ops = &virtio_ops_kernel;
387 dev->vhostfds = malloc(dev->max_queue_pairs *
389 dev->tapfds = malloc(dev->max_queue_pairs *
391 if (!dev->vhostfds || !dev->tapfds) {
392 PMD_INIT_LOG(ERR, "Failed to malloc");
396 for (q = 0; q < dev->max_queue_pairs; ++q) {
397 dev->vhostfds[q] = -1;
400 } else if (dev->backend_type ==
401 VIRTIO_USER_BACKEND_VHOST_VDPA) {
402 dev->ops = &virtio_ops_vdpa;
404 PMD_DRV_LOG(ERR, "Unknown backend type");
409 if (dev->ops->setup(dev) < 0)
412 if (virtio_user_dev_init_notify(dev) < 0)
415 if (virtio_user_fill_intr_handle(dev) < 0)
421 /* Use below macro to filter features from vhost backend */
422 #define VIRTIO_USER_SUPPORTED_FEATURES \
423 (1ULL << VIRTIO_NET_F_MAC | \
424 1ULL << VIRTIO_NET_F_STATUS | \
425 1ULL << VIRTIO_NET_F_MQ | \
426 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \
427 1ULL << VIRTIO_NET_F_CTRL_VQ | \
428 1ULL << VIRTIO_NET_F_CTRL_RX | \
429 1ULL << VIRTIO_NET_F_CTRL_VLAN | \
430 1ULL << VIRTIO_NET_F_CSUM | \
431 1ULL << VIRTIO_NET_F_HOST_TSO4 | \
432 1ULL << VIRTIO_NET_F_HOST_TSO6 | \
433 1ULL << VIRTIO_NET_F_MRG_RXBUF | \
434 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \
435 1ULL << VIRTIO_NET_F_GUEST_CSUM | \
436 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
437 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
438 1ULL << VIRTIO_F_IN_ORDER | \
439 1ULL << VIRTIO_F_VERSION_1 | \
440 1ULL << VIRTIO_F_RING_PACKED | \
441 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
443 #define VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES \
444 (1ULL << VHOST_USER_PROTOCOL_F_MQ | \
445 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
446 1ULL << VHOST_USER_PROTOCOL_F_STATUS)
449 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
450 int cq, int queue_size, const char *mac, char **ifname,
451 int server, int mrg_rxbuf, int in_order, int packed_vq,
452 enum virtio_user_backend_type backend_type)
454 uint64_t protocol_features = 0;
456 pthread_mutex_init(&dev->mutex, NULL);
457 strlcpy(dev->path, path, PATH_MAX);
459 dev->max_queue_pairs = queues;
460 dev->queue_pairs = 1; /* mq disabled by default */
461 dev->queue_size = queue_size;
462 dev->is_server = server;
463 dev->mac_specified = 0;
464 dev->frontend_features = 0;
465 dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES;
466 dev->protocol_features = VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES;
467 dev->backend_type = backend_type;
472 dev->ifname = *ifname;
476 if (virtio_user_dev_setup(dev) < 0) {
477 PMD_INIT_LOG(ERR, "backend set up fails");
481 if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER)
482 dev->unsupported_features |=
483 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
485 if (!dev->is_server) {
486 if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER,
488 PMD_INIT_LOG(ERR, "set_owner fails: %s",
493 if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
494 &dev->device_features) < 0) {
495 PMD_INIT_LOG(ERR, "get_features failed: %s",
501 if (dev->device_features &
502 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) {
503 if (dev->ops->send_request(dev,
504 VHOST_USER_GET_PROTOCOL_FEATURES,
508 dev->protocol_features &= protocol_features;
510 if (dev->ops->send_request(dev,
511 VHOST_USER_SET_PROTOCOL_FEATURES,
512 &dev->protocol_features))
515 if (!(dev->protocol_features &
516 (1ULL << VHOST_USER_PROTOCOL_F_MQ)))
517 dev->unsupported_features |=
518 (1ull << VIRTIO_NET_F_MQ);
521 /* We just pretend vhost-user can support all these features.
522 * Note that this could be problematic that if some feature is
523 * negotiated but not supported by the vhost-user which comes
526 dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
528 /* We cannot assume VHOST_USER_PROTOCOL_F_STATUS is supported
529 * until it's negotiated
531 dev->protocol_features &=
532 ~(1ULL << VHOST_USER_PROTOCOL_F_STATUS);
538 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
541 dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
544 dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
546 if (dev->mac_specified)
547 dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
549 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
552 /* device does not really need to know anything about CQ,
553 * so if necessary, we just claim to support CQ
555 dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
557 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
558 /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */
559 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
560 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
561 dev->unsupported_features |=
562 (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
563 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
564 dev->unsupported_features |=
565 (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
568 /* The backend will not report this feature, we add it explicitly */
569 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
570 dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS);
574 * (frontend_features | backend_features) & ~unsupported_features;
576 dev->device_features |= dev->frontend_features;
577 dev->device_features &= ~dev->unsupported_features;
579 if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
580 virtio_user_mem_event_cb, dev)) {
581 if (rte_errno != ENOTSUP) {
582 PMD_INIT_LOG(ERR, "Failed to register mem event"
592 virtio_user_dev_uninit(struct virtio_user_dev *dev)
596 virtio_user_stop_device(dev);
598 rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
600 for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
601 close(dev->callfds[i]);
602 close(dev->kickfds[i]);
605 if (dev->vhostfd >= 0)
608 if (dev->is_server && dev->listenfd >= 0) {
609 close(dev->listenfd);
614 for (i = 0; i < dev->max_queue_pairs; ++i) {
615 close(dev->vhostfds[i]);
616 if (dev->tapfds[i] >= 0)
617 close(dev->tapfds[i]);
630 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
635 if (q_pairs > dev->max_queue_pairs) {
636 PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported",
637 q_pairs, dev->max_queue_pairs);
641 /* Server mode can't enable queue pairs if vhostfd is invalid,
642 * always return 0 in this case.
644 if (!dev->is_server || dev->vhostfd >= 0) {
645 for (i = 0; i < q_pairs; ++i)
646 ret |= dev->ops->enable_qp(dev, i, 1);
647 for (i = q_pairs; i < dev->max_queue_pairs; ++i)
648 ret |= dev->ops->enable_qp(dev, i, 0);
650 dev->queue_pairs = q_pairs;
656 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
659 struct virtio_net_ctrl_hdr *hdr;
660 virtio_net_ctrl_ack status = ~0;
661 uint16_t i, idx_data, idx_status;
662 uint32_t n_descs = 0;
664 /* locate desc for header, data, and status */
665 idx_data = vring->desc[idx_hdr].next;
669 while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
670 i = vring->desc[i].next;
674 /* locate desc for status */
678 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
679 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
680 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
683 queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
684 status = virtio_user_handle_mq(dev, queues);
685 } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
686 hdr->class == VIRTIO_NET_CTRL_MAC ||
687 hdr->class == VIRTIO_NET_CTRL_VLAN) {
692 *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
698 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
700 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
702 return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
703 wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
707 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
708 struct vring_packed *vring,
711 struct virtio_net_ctrl_hdr *hdr;
712 virtio_net_ctrl_ack status = ~0;
713 uint16_t idx_data, idx_status;
714 /* initialize to one, header is first */
715 uint32_t n_descs = 1;
717 /* locate desc for header, data, and status */
718 idx_data = idx_hdr + 1;
719 if (idx_data >= dev->queue_size)
720 idx_data -= dev->queue_size;
724 idx_status = idx_data;
725 while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) {
727 if (idx_status >= dev->queue_size)
728 idx_status -= dev->queue_size;
732 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
733 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
734 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
737 queues = *(uint16_t *)(uintptr_t)
738 vring->desc[idx_data].addr;
739 status = virtio_user_handle_mq(dev, queues);
740 } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
741 hdr->class == VIRTIO_NET_CTRL_MAC ||
742 hdr->class == VIRTIO_NET_CTRL_VLAN) {
747 *(virtio_net_ctrl_ack *)(uintptr_t)
748 vring->desc[idx_status].addr = status;
750 /* Update used descriptor */
751 vring->desc[idx_hdr].id = vring->desc[idx_status].id;
752 vring->desc[idx_hdr].len = sizeof(status);
758 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
760 struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
761 struct vring_packed *vring = &dev->packed_vrings[queue_idx];
762 uint16_t n_descs, flags;
764 /* Perform a load-acquire barrier in desc_is_avail to
765 * enforce the ordering between desc flags and desc
768 while (desc_is_avail(&vring->desc[vq->used_idx],
769 vq->used_wrap_counter)) {
771 n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring,
774 flags = VRING_DESC_F_WRITE;
775 if (vq->used_wrap_counter)
776 flags |= VRING_PACKED_DESC_F_AVAIL_USED;
778 __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
781 vq->used_idx += n_descs;
782 if (vq->used_idx >= dev->queue_size) {
783 vq->used_idx -= dev->queue_size;
784 vq->used_wrap_counter ^= 1;
790 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
792 uint16_t avail_idx, desc_idx;
793 struct vring_used_elem *uep;
795 struct vring *vring = &dev->vrings[queue_idx];
797 /* Consume avail ring, using used ring idx as first one */
798 while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
799 != vring->avail->idx) {
800 avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
802 desc_idx = vring->avail->ring[avail_idx];
804 n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
806 /* Update used ring */
807 uep = &vring->used->ring[avail_idx];
811 __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED);
816 virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
819 uint64_t arg = status;
821 pthread_mutex_lock(&dev->mutex);
822 dev->status = status;
823 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
824 ret = dev->ops->send_request(dev,
825 VHOST_USER_SET_STATUS, &arg);
826 else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA)
827 ret = dev->ops->send_request(dev,
828 VHOST_USER_SET_STATUS, &status);
832 if (ret && ret != -ENOTSUP) {
833 PMD_INIT_LOG(ERR, "VHOST_USER_SET_STATUS failed (%d): %s", ret,
837 pthread_mutex_unlock(&dev->mutex);
842 virtio_user_dev_update_status(struct virtio_user_dev *dev)
848 pthread_mutex_lock(&dev->mutex);
849 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) {
850 err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS, &ret);
851 if (!err && ret > UINT8_MAX) {
852 PMD_INIT_LOG(ERR, "Invalid VHOST_USER_GET_STATUS "
853 "response 0x%" PRIx64 "\n", ret);
859 } else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) {
860 err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS,
867 dev->status = status;
868 PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n"
870 "\t-ACKNOWLEDGE: %u\n"
873 "\t-FEATURES_OK: %u\n"
874 "\t-DEVICE_NEED_RESET: %u\n"
877 (dev->status == VIRTIO_CONFIG_STATUS_RESET),
878 !!(dev->status & VIRTIO_CONFIG_STATUS_ACK),
879 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER),
880 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK),
881 !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK),
882 !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET),
883 !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED));
884 } else if (err != -ENOTSUP) {
885 PMD_INIT_LOG(ERR, "VHOST_USER_GET_STATUS failed (%d): %s", err,
890 pthread_mutex_unlock(&dev->mutex);