1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
12 #include <sys/eventfd.h>
13 #include <sys/types.h>
16 #include <rte_alarm.h>
17 #include <rte_string_fns.h>
18 #include <rte_eal_memconfig.h>
21 #include "virtio_user_dev.h"
22 #include "../virtio_ethdev.h"
24 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
26 const char * const virtio_user_backend_strings[] = {
27 [VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN",
28 [VIRTIO_USER_BACKEND_VHOST_USER] = "VHOST_USER",
29 [VIRTIO_USER_BACKEND_VHOST_KERNEL] = "VHOST_NET",
30 [VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA",
34 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
36 /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
37 * firstly because vhost depends on this msg to allocate virtqueue
40 struct vhost_vring_file file;
43 file.index = queue_sel;
44 file.fd = dev->callfds[queue_sel];
45 ret = dev->ops->set_vring_call(dev, &file);
47 PMD_INIT_LOG(ERR, "(%s) Failed to create queue %u", dev->path, queue_sel);
55 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
58 struct vhost_vring_file file;
59 struct vhost_vring_state state;
60 struct vring *vring = &dev->vrings[queue_sel];
61 struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
62 struct vhost_vring_addr addr = {
65 .flags = 0, /* disable log */
68 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
70 (uint64_t)(uintptr_t)pq_vring->desc;
71 addr.avail_user_addr =
72 (uint64_t)(uintptr_t)pq_vring->driver;
74 (uint64_t)(uintptr_t)pq_vring->device;
76 addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
77 addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
78 addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
81 state.index = queue_sel;
82 state.num = vring->num;
83 ret = dev->ops->set_vring_num(dev, &state);
87 state.index = queue_sel;
88 state.num = 0; /* no reservation */
89 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
90 state.num |= (1 << 15);
91 ret = dev->ops->set_vring_base(dev, &state);
95 ret = dev->ops->set_vring_addr(dev, &addr);
99 /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
100 * lastly because vhost depends on this msg to judge if
103 file.index = queue_sel;
104 file.fd = dev->kickfds[queue_sel];
105 ret = dev->ops->set_vring_kick(dev, &file);
111 PMD_INIT_LOG(ERR, "(%s) Failed to kick queue %u", dev->path, queue_sel);
117 virtio_user_queue_setup(struct virtio_user_dev *dev,
118 int (*fn)(struct virtio_user_dev *, uint32_t))
120 uint32_t i, queue_sel;
122 for (i = 0; i < dev->max_queue_pairs; ++i) {
123 queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
124 if (fn(dev, queue_sel) < 0) {
125 PMD_DRV_LOG(ERR, "(%s) setup rx vq %u failed", dev->path, i);
129 for (i = 0; i < dev->max_queue_pairs; ++i) {
130 queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
131 if (fn(dev, queue_sel) < 0) {
132 PMD_DRV_LOG(INFO, "(%s) setup tx vq %u failed", dev->path, i);
141 virtio_user_dev_set_features(struct virtio_user_dev *dev)
146 pthread_mutex_lock(&dev->mutex);
148 /* Step 0: tell vhost to create queues */
149 if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
152 features = dev->features;
154 /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
155 features &= ~(1ull << VIRTIO_NET_F_MAC);
156 /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
157 features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
158 features &= ~(1ull << VIRTIO_NET_F_STATUS);
159 ret = dev->ops->set_features(dev, features);
162 PMD_DRV_LOG(INFO, "(%s) set features: 0x%" PRIx64, dev->path, features);
164 pthread_mutex_unlock(&dev->mutex);
170 virtio_user_start_device(struct virtio_user_dev *dev)
177 * We need to make sure that the locks will be
178 * taken in the correct order to avoid deadlocks.
180 * Before releasing this lock, this thread should
181 * not trigger any memory hotplug events.
183 * This is a temporary workaround, and should be
184 * replaced when we get proper supports from the
185 * memory subsystem in the future.
187 rte_mcfg_mem_read_lock();
188 pthread_mutex_lock(&dev->mutex);
190 /* Step 2: share memory regions */
191 ret = dev->ops->set_memory_table(dev);
195 /* Step 3: kick queues */
196 ret = virtio_user_queue_setup(dev, virtio_user_kick_queue);
200 /* Step 4: enable queues
201 * we enable the 1st queue pair by default.
203 ret = dev->ops->enable_qp(dev, 0, 1);
209 pthread_mutex_unlock(&dev->mutex);
210 rte_mcfg_mem_read_unlock();
214 pthread_mutex_unlock(&dev->mutex);
215 rte_mcfg_mem_read_unlock();
217 PMD_INIT_LOG(ERR, "(%s) Failed to start device", dev->path);
219 /* TODO: free resource here or caller to check */
223 int virtio_user_stop_device(struct virtio_user_dev *dev)
225 struct vhost_vring_state state;
229 pthread_mutex_lock(&dev->mutex);
233 for (i = 0; i < dev->max_queue_pairs; ++i) {
234 ret = dev->ops->enable_qp(dev, i, 0);
239 /* Stop the backend. */
240 for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
242 ret = dev->ops->get_vring_base(dev, &state);
244 PMD_DRV_LOG(ERR, "(%s) get_vring_base failed, index=%u", dev->path, i);
249 dev->started = false;
252 pthread_mutex_unlock(&dev->mutex);
256 pthread_mutex_unlock(&dev->mutex);
258 PMD_INIT_LOG(ERR, "(%s) Failed to stop device", dev->path);
264 virtio_user_dev_set_mac(struct virtio_user_dev *dev)
268 if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MAC)))
271 if (!dev->ops->set_config)
274 ret = dev->ops->set_config(dev, dev->mac_addr,
275 offsetof(struct virtio_net_config, mac),
278 PMD_DRV_LOG(ERR, "(%s) Failed to set MAC address in device", dev->path);
284 virtio_user_dev_get_mac(struct virtio_user_dev *dev)
288 if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MAC)))
291 if (!dev->ops->get_config)
294 ret = dev->ops->get_config(dev, dev->mac_addr,
295 offsetof(struct virtio_net_config, mac),
298 PMD_DRV_LOG(ERR, "(%s) Failed to get MAC address from device", dev->path);
304 virtio_user_dev_init_mac(struct virtio_user_dev *dev, const char *mac)
306 struct rte_ether_addr cmdline_mac;
307 char buf[RTE_ETHER_ADDR_FMT_SIZE];
310 if (mac && rte_ether_unformat_addr(mac, &cmdline_mac) == 0) {
312 * MAC address was passed from command-line, try to store
313 * it in the device if it supports it. Otherwise try to use
316 memcpy(dev->mac_addr, &cmdline_mac, RTE_ETHER_ADDR_LEN);
317 dev->mac_specified = 1;
319 /* Setting MAC may fail, continue to get the device one in this case */
320 virtio_user_dev_set_mac(dev);
321 ret = virtio_user_dev_get_mac(dev);
325 if (memcmp(&cmdline_mac, dev->mac_addr, RTE_ETHER_ADDR_LEN))
326 PMD_DRV_LOG(INFO, "(%s) Device MAC update failed", dev->path);
328 ret = virtio_user_dev_get_mac(dev);
330 PMD_DRV_LOG(ERR, "(%s) No valid MAC in devargs or device, use random",
335 dev->mac_specified = 1;
338 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE,
339 (struct rte_ether_addr *)dev->mac_addr);
340 PMD_DRV_LOG(INFO, "(%s) MAC %s specified", dev->path, buf);
344 virtio_user_dev_init_notify(struct virtio_user_dev *dev)
350 for (i = 0; i < dev->max_queue_pairs * 2; i++) {
351 /* May use invalid flag, but some backend uses kickfd and
352 * callfd as criteria to judge if dev is alive. so finally we
355 callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
357 PMD_DRV_LOG(ERR, "(%s) callfd error, %s", dev->path, strerror(errno));
360 kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
363 PMD_DRV_LOG(ERR, "(%s) kickfd error, %s", dev->path, strerror(errno));
366 dev->callfds[i] = callfd;
367 dev->kickfds[i] = kickfd;
372 for (j = 0; j < i; j++) {
373 if (dev->kickfds[j] >= 0) {
374 close(dev->kickfds[j]);
375 dev->kickfds[j] = -1;
377 if (dev->callfds[j] >= 0) {
378 close(dev->callfds[j]);
379 dev->callfds[j] = -1;
387 virtio_user_dev_uninit_notify(struct virtio_user_dev *dev)
391 for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
392 if (dev->kickfds[i] >= 0) {
393 close(dev->kickfds[i]);
394 dev->kickfds[i] = -1;
396 if (dev->callfds[i] >= 0) {
397 close(dev->callfds[i]);
398 dev->callfds[i] = -1;
404 virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
407 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
409 if (eth_dev->intr_handle == NULL) {
410 eth_dev->intr_handle =
411 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
412 if (eth_dev->intr_handle == NULL) {
413 PMD_DRV_LOG(ERR, "(%s) failed to allocate intr_handle", dev->path);
418 for (i = 0; i < dev->max_queue_pairs; ++i) {
419 if (rte_intr_efds_index_set(eth_dev->intr_handle, i,
424 if (rte_intr_nb_efd_set(eth_dev->intr_handle, dev->max_queue_pairs))
427 if (rte_intr_max_intr_set(eth_dev->intr_handle,
428 dev->max_queue_pairs + 1))
431 if (rte_intr_type_set(eth_dev->intr_handle, RTE_INTR_HANDLE_VDEV))
434 /* For virtio vdev, no need to read counter for clean */
435 if (rte_intr_efd_counter_size_set(eth_dev->intr_handle, 0))
438 if (rte_intr_fd_set(eth_dev->intr_handle, dev->ops->get_intr_fd(dev)))
445 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
447 size_t len __rte_unused,
450 struct virtio_user_dev *dev = arg;
451 struct rte_memseg_list *msl;
455 /* ignore externally allocated memory */
456 msl = rte_mem_virt2memseg_list(addr);
460 pthread_mutex_lock(&dev->mutex);
462 if (dev->started == false)
465 /* Step 1: pause the active queues */
466 for (i = 0; i < dev->queue_pairs; i++) {
467 ret = dev->ops->enable_qp(dev, i, 0);
472 /* Step 2: update memory regions */
473 ret = dev->ops->set_memory_table(dev);
477 /* Step 3: resume the active queues */
478 for (i = 0; i < dev->queue_pairs; i++) {
479 ret = dev->ops->enable_qp(dev, i, 1);
485 pthread_mutex_unlock(&dev->mutex);
488 PMD_DRV_LOG(ERR, "(%s) Failed to update memory table", dev->path);
492 virtio_user_dev_setup(struct virtio_user_dev *dev)
494 if (dev->is_server) {
495 if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) {
496 PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!");
501 switch (dev->backend_type) {
502 case VIRTIO_USER_BACKEND_VHOST_USER:
503 dev->ops = &virtio_ops_user;
505 case VIRTIO_USER_BACKEND_VHOST_KERNEL:
506 dev->ops = &virtio_ops_kernel;
508 case VIRTIO_USER_BACKEND_VHOST_VDPA:
509 dev->ops = &virtio_ops_vdpa;
512 PMD_DRV_LOG(ERR, "(%s) Unknown backend type", dev->path);
516 if (dev->ops->setup(dev) < 0) {
517 PMD_INIT_LOG(ERR, "(%s) Failed to setup backend", dev->path);
521 if (virtio_user_dev_init_notify(dev) < 0) {
522 PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers", dev->path);
526 if (virtio_user_fill_intr_handle(dev) < 0) {
527 PMD_INIT_LOG(ERR, "(%s) Failed to init interrupt handler", dev->path);
534 virtio_user_dev_uninit_notify(dev);
536 dev->ops->destroy(dev);
541 /* Use below macro to filter features from vhost backend */
542 #define VIRTIO_USER_SUPPORTED_FEATURES \
543 (1ULL << VIRTIO_NET_F_MAC | \
544 1ULL << VIRTIO_NET_F_STATUS | \
545 1ULL << VIRTIO_NET_F_MQ | \
546 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \
547 1ULL << VIRTIO_NET_F_CTRL_VQ | \
548 1ULL << VIRTIO_NET_F_CTRL_RX | \
549 1ULL << VIRTIO_NET_F_CTRL_VLAN | \
550 1ULL << VIRTIO_NET_F_CSUM | \
551 1ULL << VIRTIO_NET_F_HOST_TSO4 | \
552 1ULL << VIRTIO_NET_F_HOST_TSO6 | \
553 1ULL << VIRTIO_NET_F_MRG_RXBUF | \
554 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \
555 1ULL << VIRTIO_NET_F_GUEST_CSUM | \
556 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
557 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
558 1ULL << VIRTIO_F_IN_ORDER | \
559 1ULL << VIRTIO_F_VERSION_1 | \
560 1ULL << VIRTIO_F_RING_PACKED)
563 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
564 int cq, int queue_size, const char *mac, char **ifname,
565 int server, int mrg_rxbuf, int in_order, int packed_vq,
566 enum virtio_user_backend_type backend_type)
568 uint64_t backend_features;
571 pthread_mutex_init(&dev->mutex, NULL);
572 strlcpy(dev->path, path, PATH_MAX);
574 for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) {
575 dev->kickfds[i] = -1;
576 dev->callfds[i] = -1;
580 dev->max_queue_pairs = queues;
581 dev->queue_pairs = 1; /* mq disabled by default */
582 dev->queue_size = queue_size;
583 dev->is_server = server;
584 dev->mac_specified = 0;
585 dev->frontend_features = 0;
586 dev->unsupported_features = 0;
587 dev->backend_type = backend_type;
590 dev->ifname = *ifname;
594 if (virtio_user_dev_setup(dev) < 0) {
595 PMD_INIT_LOG(ERR, "(%s) backend set up fails", dev->path);
599 if (dev->ops->set_owner(dev) < 0) {
600 PMD_INIT_LOG(ERR, "(%s) Failed to set backend owner", dev->path);
604 if (dev->ops->get_backend_features(&backend_features) < 0) {
605 PMD_INIT_LOG(ERR, "(%s) Failed to get backend features", dev->path);
609 dev->unsupported_features = ~(VIRTIO_USER_SUPPORTED_FEATURES | backend_features);
611 if (dev->ops->get_features(dev, &dev->device_features) < 0) {
612 PMD_INIT_LOG(ERR, "(%s) Failed to get device features", dev->path);
616 virtio_user_dev_init_mac(dev, mac);
619 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
622 dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
625 dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
627 if (dev->mac_specified)
628 dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
630 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
633 /* device does not really need to know anything about CQ,
634 * so if necessary, we just claim to support CQ
636 dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
638 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
639 /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */
640 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
641 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
642 dev->unsupported_features |=
643 (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
644 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
645 dev->unsupported_features |=
646 (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
649 /* The backend will not report this feature, we add it explicitly */
650 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
651 dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS);
653 dev->frontend_features &= ~dev->unsupported_features;
654 dev->device_features &= ~dev->unsupported_features;
656 if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
657 virtio_user_mem_event_cb, dev)) {
658 if (rte_errno != ENOTSUP) {
659 PMD_INIT_LOG(ERR, "(%s) Failed to register mem event callback",
669 virtio_user_dev_uninit(struct virtio_user_dev *dev)
671 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
673 rte_intr_instance_free(eth_dev->intr_handle);
674 eth_dev->intr_handle = NULL;
676 virtio_user_stop_device(dev);
678 rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
680 virtio_user_dev_uninit_notify(dev);
687 dev->ops->destroy(dev);
691 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
696 if (q_pairs > dev->max_queue_pairs) {
697 PMD_INIT_LOG(ERR, "(%s) multi-q config %u, but only %u supported",
698 dev->path, q_pairs, dev->max_queue_pairs);
702 for (i = 0; i < q_pairs; ++i)
703 ret |= dev->ops->enable_qp(dev, i, 1);
704 for (i = q_pairs; i < dev->max_queue_pairs; ++i)
705 ret |= dev->ops->enable_qp(dev, i, 0);
707 dev->queue_pairs = q_pairs;
713 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
716 struct virtio_net_ctrl_hdr *hdr;
717 virtio_net_ctrl_ack status = ~0;
718 uint16_t i, idx_data, idx_status;
719 uint32_t n_descs = 0;
721 /* locate desc for header, data, and status */
722 idx_data = vring->desc[idx_hdr].next;
726 while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
727 i = vring->desc[i].next;
731 /* locate desc for status */
735 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
736 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
737 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
740 queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
741 status = virtio_user_handle_mq(dev, queues);
742 } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
743 hdr->class == VIRTIO_NET_CTRL_MAC ||
744 hdr->class == VIRTIO_NET_CTRL_VLAN) {
749 *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
755 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
757 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
759 return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
760 wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
764 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
765 struct vring_packed *vring,
768 struct virtio_net_ctrl_hdr *hdr;
769 virtio_net_ctrl_ack status = ~0;
770 uint16_t idx_data, idx_status;
771 /* initialize to one, header is first */
772 uint32_t n_descs = 1;
774 /* locate desc for header, data, and status */
775 idx_data = idx_hdr + 1;
776 if (idx_data >= dev->queue_size)
777 idx_data -= dev->queue_size;
781 idx_status = idx_data;
782 while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) {
784 if (idx_status >= dev->queue_size)
785 idx_status -= dev->queue_size;
789 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
790 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
791 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
794 queues = *(uint16_t *)(uintptr_t)
795 vring->desc[idx_data].addr;
796 status = virtio_user_handle_mq(dev, queues);
797 } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
798 hdr->class == VIRTIO_NET_CTRL_MAC ||
799 hdr->class == VIRTIO_NET_CTRL_VLAN) {
804 *(virtio_net_ctrl_ack *)(uintptr_t)
805 vring->desc[idx_status].addr = status;
807 /* Update used descriptor */
808 vring->desc[idx_hdr].id = vring->desc[idx_status].id;
809 vring->desc[idx_hdr].len = sizeof(status);
815 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
817 struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
818 struct vring_packed *vring = &dev->packed_vrings[queue_idx];
819 uint16_t n_descs, flags;
821 /* Perform a load-acquire barrier in desc_is_avail to
822 * enforce the ordering between desc flags and desc
825 while (desc_is_avail(&vring->desc[vq->used_idx],
826 vq->used_wrap_counter)) {
828 n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring,
831 flags = VRING_DESC_F_WRITE;
832 if (vq->used_wrap_counter)
833 flags |= VRING_PACKED_DESC_F_AVAIL_USED;
835 __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
838 vq->used_idx += n_descs;
839 if (vq->used_idx >= dev->queue_size) {
840 vq->used_idx -= dev->queue_size;
841 vq->used_wrap_counter ^= 1;
847 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
849 uint16_t avail_idx, desc_idx;
850 struct vring_used_elem *uep;
852 struct vring *vring = &dev->vrings[queue_idx];
854 /* Consume avail ring, using used ring idx as first one */
855 while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
856 != vring->avail->idx) {
857 avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
859 desc_idx = vring->avail->ring[avail_idx];
861 n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
863 /* Update used ring */
864 uep = &vring->used->ring[avail_idx];
868 __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED);
873 virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
877 pthread_mutex_lock(&dev->mutex);
878 dev->status = status;
879 ret = dev->ops->set_status(dev, status);
880 if (ret && ret != -ENOTSUP)
881 PMD_INIT_LOG(ERR, "(%s) Failed to set backend status", dev->path);
883 pthread_mutex_unlock(&dev->mutex);
888 virtio_user_dev_update_status(struct virtio_user_dev *dev)
893 pthread_mutex_lock(&dev->mutex);
895 ret = dev->ops->get_status(dev, &status);
897 dev->status = status;
898 PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n"
900 "\t-ACKNOWLEDGE: %u\n"
903 "\t-FEATURES_OK: %u\n"
904 "\t-DEVICE_NEED_RESET: %u\n"
907 (dev->status == VIRTIO_CONFIG_STATUS_RESET),
908 !!(dev->status & VIRTIO_CONFIG_STATUS_ACK),
909 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER),
910 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK),
911 !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK),
912 !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET),
913 !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED));
914 } else if (ret != -ENOTSUP) {
915 PMD_INIT_LOG(ERR, "(%s) Failed to get backend status", dev->path);
918 pthread_mutex_unlock(&dev->mutex);
923 virtio_user_dev_update_link_state(struct virtio_user_dev *dev)
925 if (dev->ops->update_link_state)
926 return dev->ops->update_link_state(dev);
932 virtio_user_dev_reset_queues_packed(struct rte_eth_dev *eth_dev)
934 struct virtio_user_dev *dev = eth_dev->data->dev_private;
935 struct virtio_hw *hw = &dev->hw;
936 struct virtnet_rx *rxvq;
937 struct virtnet_tx *txvq;
940 /* Add lock to avoid queue contention. */
941 rte_spinlock_lock(&hw->state_lock);
945 * Waiting for datapath to complete before resetting queues.
946 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
950 /* Vring reset for each Tx queue and Rx queue. */
951 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
952 rxvq = eth_dev->data->rx_queues[i];
953 virtqueue_rxvq_reset_packed(virtnet_rxq_to_vq(rxvq));
954 virtio_dev_rx_queue_setup_finish(eth_dev, i);
957 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
958 txvq = eth_dev->data->tx_queues[i];
959 virtqueue_txvq_reset_packed(virtnet_txq_to_vq(txvq));
963 rte_spinlock_unlock(&hw->state_lock);
967 virtio_user_dev_delayed_disconnect_handler(void *param)
969 struct virtio_user_dev *dev = param;
970 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
972 if (rte_intr_disable(eth_dev->intr_handle) < 0) {
973 PMD_DRV_LOG(ERR, "interrupt disable failed");
976 PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d",
977 rte_intr_fd_get(eth_dev->intr_handle));
978 if (rte_intr_callback_unregister(eth_dev->intr_handle,
979 virtio_interrupt_handler,
981 PMD_DRV_LOG(ERR, "interrupt unregister failed");
983 if (dev->is_server) {
984 if (dev->ops->server_disconnect)
985 dev->ops->server_disconnect(dev);
987 rte_intr_fd_set(eth_dev->intr_handle,
988 dev->ops->get_intr_fd(dev));
990 PMD_DRV_LOG(DEBUG, "Registering intr fd: %d",
991 rte_intr_fd_get(eth_dev->intr_handle));
993 if (rte_intr_callback_register(eth_dev->intr_handle,
994 virtio_interrupt_handler,
996 PMD_DRV_LOG(ERR, "interrupt register failed");
998 if (rte_intr_enable(eth_dev->intr_handle) < 0) {
999 PMD_DRV_LOG(ERR, "interrupt enable failed");
1006 virtio_user_dev_delayed_intr_reconfig_handler(void *param)
1008 struct virtio_user_dev *dev = param;
1009 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
1011 PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d",
1012 rte_intr_fd_get(eth_dev->intr_handle));
1014 if (rte_intr_callback_unregister(eth_dev->intr_handle,
1015 virtio_interrupt_handler,
1017 PMD_DRV_LOG(ERR, "interrupt unregister failed");
1019 rte_intr_fd_set(eth_dev->intr_handle, dev->ops->get_intr_fd(dev));
1021 PMD_DRV_LOG(DEBUG, "Registering intr fd: %d",
1022 rte_intr_fd_get(eth_dev->intr_handle));
1024 if (rte_intr_callback_register(eth_dev->intr_handle,
1025 virtio_interrupt_handler, eth_dev))
1026 PMD_DRV_LOG(ERR, "interrupt register failed");
1028 if (rte_intr_enable(eth_dev->intr_handle) < 0)
1029 PMD_DRV_LOG(ERR, "interrupt enable failed");
1033 virtio_user_dev_server_reconnect(struct virtio_user_dev *dev)
1035 int ret, old_status;
1036 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
1037 struct virtio_hw *hw = &dev->hw;
1039 if (!dev->ops->server_reconnect) {
1040 PMD_DRV_LOG(ERR, "(%s) Missing server reconnect callback", dev->path);
1044 if (dev->ops->server_reconnect(dev)) {
1045 PMD_DRV_LOG(ERR, "(%s) Reconnect callback call failed", dev->path);
1049 old_status = dev->status;
1053 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
1055 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
1057 if (dev->ops->get_features(dev, &dev->device_features) < 0) {
1058 PMD_INIT_LOG(ERR, "get_features failed: %s",
1063 /* unmask vhost-user unsupported features */
1064 dev->device_features &= ~(dev->unsupported_features);
1066 dev->features &= (dev->device_features | dev->frontend_features);
1068 /* For packed ring, resetting queues is required in reconnection. */
1069 if (virtio_with_packed_queue(hw) &&
1070 (old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
1071 PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
1072 " when packed ring reconnecting.");
1073 virtio_user_dev_reset_queues_packed(eth_dev);
1076 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1078 /* Start the device */
1079 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
1083 if (dev->queue_pairs > 1) {
1084 ret = virtio_user_handle_mq(dev, dev->queue_pairs);
1086 PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
1090 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1091 if (rte_intr_disable(eth_dev->intr_handle) < 0) {
1092 PMD_DRV_LOG(ERR, "interrupt disable failed");
1096 * This function can be called from the interrupt handler, so
1097 * we can't unregister interrupt handler here. Setting
1098 * alarm to do that later.
1100 rte_eal_alarm_set(1,
1101 virtio_user_dev_delayed_intr_reconfig_handler,
1104 PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");