1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
12 #include <sys/eventfd.h>
13 #include <sys/types.h>
16 #include <rte_alarm.h>
17 #include <rte_string_fns.h>
18 #include <rte_eal_memconfig.h>
21 #include "virtio_user_dev.h"
22 #include "../virtio_ethdev.h"
24 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
26 const char * const virtio_user_backend_strings[] = {
27 [VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN",
28 [VIRTIO_USER_BACKEND_VHOST_USER] = "VHOST_USER",
29 [VIRTIO_USER_BACKEND_VHOST_KERNEL] = "VHOST_NET",
30 [VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA",
34 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
36 /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
37 * firstly because vhost depends on this msg to allocate virtqueue
40 struct vhost_vring_file file;
43 file.index = queue_sel;
44 file.fd = dev->callfds[queue_sel];
45 ret = dev->ops->set_vring_call(dev, &file);
47 PMD_INIT_LOG(ERR, "(%s) Failed to create queue %u\n", dev->path, queue_sel);
55 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
58 struct vhost_vring_file file;
59 struct vhost_vring_state state;
60 struct vring *vring = &dev->vrings[queue_sel];
61 struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
62 struct vhost_vring_addr addr = {
65 .flags = 0, /* disable log */
68 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
70 (uint64_t)(uintptr_t)pq_vring->desc;
71 addr.avail_user_addr =
72 (uint64_t)(uintptr_t)pq_vring->driver;
74 (uint64_t)(uintptr_t)pq_vring->device;
76 addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
77 addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
78 addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
81 state.index = queue_sel;
82 state.num = vring->num;
83 ret = dev->ops->set_vring_num(dev, &state);
87 state.index = queue_sel;
88 state.num = 0; /* no reservation */
89 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
90 state.num |= (1 << 15);
91 ret = dev->ops->set_vring_base(dev, &state);
95 ret = dev->ops->set_vring_addr(dev, &addr);
99 /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
100 * lastly because vhost depends on this msg to judge if
103 file.index = queue_sel;
104 file.fd = dev->kickfds[queue_sel];
105 ret = dev->ops->set_vring_kick(dev, &file);
111 PMD_INIT_LOG(ERR, "(%s) Failed to kick queue %u\n", dev->path, queue_sel);
117 virtio_user_queue_setup(struct virtio_user_dev *dev,
118 int (*fn)(struct virtio_user_dev *, uint32_t))
120 uint32_t i, queue_sel;
122 for (i = 0; i < dev->max_queue_pairs; ++i) {
123 queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
124 if (fn(dev, queue_sel) < 0) {
125 PMD_DRV_LOG(ERR, "(%s) setup rx vq %u failed", dev->path, i);
129 for (i = 0; i < dev->max_queue_pairs; ++i) {
130 queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
131 if (fn(dev, queue_sel) < 0) {
132 PMD_DRV_LOG(INFO, "(%s) setup tx vq %u failed", dev->path, i);
141 virtio_user_dev_set_features(struct virtio_user_dev *dev)
146 pthread_mutex_lock(&dev->mutex);
148 /* Step 0: tell vhost to create queues */
149 if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
152 features = dev->features;
154 /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
155 features &= ~(1ull << VIRTIO_NET_F_MAC);
156 /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
157 features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
158 features &= ~(1ull << VIRTIO_NET_F_STATUS);
159 ret = dev->ops->set_features(dev, features);
162 PMD_DRV_LOG(INFO, "(%s) set features: 0x%" PRIx64, dev->path, features);
164 pthread_mutex_unlock(&dev->mutex);
170 virtio_user_start_device(struct virtio_user_dev *dev)
177 * We need to make sure that the locks will be
178 * taken in the correct order to avoid deadlocks.
180 * Before releasing this lock, this thread should
181 * not trigger any memory hotplug events.
183 * This is a temporary workaround, and should be
184 * replaced when we get proper supports from the
185 * memory subsystem in the future.
187 rte_mcfg_mem_read_lock();
188 pthread_mutex_lock(&dev->mutex);
190 /* Step 2: share memory regions */
191 ret = dev->ops->set_memory_table(dev);
195 /* Step 3: kick queues */
196 ret = virtio_user_queue_setup(dev, virtio_user_kick_queue);
200 /* Step 4: enable queues
201 * we enable the 1st queue pair by default.
203 ret = dev->ops->enable_qp(dev, 0, 1);
209 pthread_mutex_unlock(&dev->mutex);
210 rte_mcfg_mem_read_unlock();
214 pthread_mutex_unlock(&dev->mutex);
215 rte_mcfg_mem_read_unlock();
217 PMD_INIT_LOG(ERR, "(%s) Failed to start device\n", dev->path);
219 /* TODO: free resource here or caller to check */
223 int virtio_user_stop_device(struct virtio_user_dev *dev)
225 struct vhost_vring_state state;
229 pthread_mutex_lock(&dev->mutex);
233 for (i = 0; i < dev->max_queue_pairs; ++i) {
234 ret = dev->ops->enable_qp(dev, i, 0);
239 /* Stop the backend. */
240 for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
242 ret = dev->ops->get_vring_base(dev, &state);
244 PMD_DRV_LOG(ERR, "(%s) get_vring_base failed, index=%u", dev->path, i);
249 dev->started = false;
252 pthread_mutex_unlock(&dev->mutex);
256 pthread_mutex_unlock(&dev->mutex);
258 PMD_INIT_LOG(ERR, "(%s) Failed to stop device\n", dev->path);
264 parse_mac(struct virtio_user_dev *dev, const char *mac)
266 struct rte_ether_addr tmp;
271 if (rte_ether_unformat_addr(mac, &tmp) == 0) {
272 memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN);
273 dev->mac_specified = 1;
275 /* ignore the wrong mac, use random mac */
276 PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac);
281 virtio_user_dev_init_notify(struct virtio_user_dev *dev)
287 for (i = 0; i < dev->max_queue_pairs * 2; i++) {
288 /* May use invalid flag, but some backend uses kickfd and
289 * callfd as criteria to judge if dev is alive. so finally we
292 callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
294 PMD_DRV_LOG(ERR, "(%s) callfd error, %s", dev->path, strerror(errno));
297 kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
300 PMD_DRV_LOG(ERR, "(%s) kickfd error, %s", dev->path, strerror(errno));
303 dev->callfds[i] = callfd;
304 dev->kickfds[i] = kickfd;
309 for (j = 0; j < i; j++) {
310 if (dev->kickfds[j] >= 0) {
311 close(dev->kickfds[j]);
312 dev->kickfds[j] = -1;
314 if (dev->callfds[j] >= 0) {
315 close(dev->callfds[j]);
316 dev->callfds[j] = -1;
324 virtio_user_dev_uninit_notify(struct virtio_user_dev *dev)
328 for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
329 if (dev->kickfds[i] >= 0) {
330 close(dev->kickfds[i]);
331 dev->kickfds[i] = -1;
333 if (dev->callfds[i] >= 0) {
334 close(dev->callfds[i]);
335 dev->callfds[i] = -1;
341 virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
344 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
346 if (!eth_dev->intr_handle) {
347 eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle));
348 if (!eth_dev->intr_handle) {
349 PMD_DRV_LOG(ERR, "(%s) failed to allocate intr_handle", dev->path);
352 memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle));
355 for (i = 0; i < dev->max_queue_pairs; ++i)
356 eth_dev->intr_handle->efds[i] = dev->callfds[i];
357 eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
358 eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
359 eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
360 /* For virtio vdev, no need to read counter for clean */
361 eth_dev->intr_handle->efd_counter_size = 0;
362 eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev);
368 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
370 size_t len __rte_unused,
373 struct virtio_user_dev *dev = arg;
374 struct rte_memseg_list *msl;
378 /* ignore externally allocated memory */
379 msl = rte_mem_virt2memseg_list(addr);
383 pthread_mutex_lock(&dev->mutex);
385 if (dev->started == false)
388 /* Step 1: pause the active queues */
389 for (i = 0; i < dev->queue_pairs; i++) {
390 ret = dev->ops->enable_qp(dev, i, 0);
395 /* Step 2: update memory regions */
396 ret = dev->ops->set_memory_table(dev);
400 /* Step 3: resume the active queues */
401 for (i = 0; i < dev->queue_pairs; i++) {
402 ret = dev->ops->enable_qp(dev, i, 1);
408 pthread_mutex_unlock(&dev->mutex);
411 PMD_DRV_LOG(ERR, "(%s) Failed to update memory table\n", dev->path);
415 virtio_user_dev_setup(struct virtio_user_dev *dev)
417 if (dev->is_server) {
418 if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) {
419 PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!");
424 switch (dev->backend_type) {
425 case VIRTIO_USER_BACKEND_VHOST_USER:
426 dev->ops = &virtio_ops_user;
428 case VIRTIO_USER_BACKEND_VHOST_KERNEL:
429 dev->ops = &virtio_ops_kernel;
431 case VIRTIO_USER_BACKEND_VHOST_VDPA:
432 dev->ops = &virtio_ops_vdpa;
435 PMD_DRV_LOG(ERR, "(%s) Unknown backend type", dev->path);
439 if (dev->ops->setup(dev) < 0) {
440 PMD_INIT_LOG(ERR, "(%s) Failed to setup backend\n", dev->path);
444 if (virtio_user_dev_init_notify(dev) < 0) {
445 PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers\n", dev->path);
449 if (virtio_user_fill_intr_handle(dev) < 0) {
450 PMD_INIT_LOG(ERR, "(%s) Failed to init interrupt handler\n", dev->path);
457 virtio_user_dev_uninit_notify(dev);
459 dev->ops->destroy(dev);
464 /* Use below macro to filter features from vhost backend */
465 #define VIRTIO_USER_SUPPORTED_FEATURES \
466 (1ULL << VIRTIO_NET_F_MAC | \
467 1ULL << VIRTIO_NET_F_STATUS | \
468 1ULL << VIRTIO_NET_F_MQ | \
469 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \
470 1ULL << VIRTIO_NET_F_CTRL_VQ | \
471 1ULL << VIRTIO_NET_F_CTRL_RX | \
472 1ULL << VIRTIO_NET_F_CTRL_VLAN | \
473 1ULL << VIRTIO_NET_F_CSUM | \
474 1ULL << VIRTIO_NET_F_HOST_TSO4 | \
475 1ULL << VIRTIO_NET_F_HOST_TSO6 | \
476 1ULL << VIRTIO_NET_F_MRG_RXBUF | \
477 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \
478 1ULL << VIRTIO_NET_F_GUEST_CSUM | \
479 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
480 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
481 1ULL << VIRTIO_F_IN_ORDER | \
482 1ULL << VIRTIO_F_VERSION_1 | \
483 1ULL << VIRTIO_F_RING_PACKED)
486 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
487 int cq, int queue_size, const char *mac, char **ifname,
488 int server, int mrg_rxbuf, int in_order, int packed_vq,
489 enum virtio_user_backend_type backend_type)
491 uint64_t backend_features;
494 pthread_mutex_init(&dev->mutex, NULL);
495 strlcpy(dev->path, path, PATH_MAX);
497 for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) {
498 dev->kickfds[i] = -1;
499 dev->callfds[i] = -1;
503 dev->max_queue_pairs = queues;
504 dev->queue_pairs = 1; /* mq disabled by default */
505 dev->queue_size = queue_size;
506 dev->is_server = server;
507 dev->mac_specified = 0;
508 dev->frontend_features = 0;
509 dev->unsupported_features = 0;
510 dev->backend_type = backend_type;
515 dev->ifname = *ifname;
519 if (virtio_user_dev_setup(dev) < 0) {
520 PMD_INIT_LOG(ERR, "(%s) backend set up fails", dev->path);
524 if (dev->ops->set_owner(dev) < 0) {
525 PMD_INIT_LOG(ERR, "(%s) Failed to set backend owner", dev->path);
529 if (dev->ops->get_backend_features(&backend_features) < 0) {
530 PMD_INIT_LOG(ERR, "(%s) Failed to get backend features", dev->path);
534 dev->unsupported_features = ~(VIRTIO_USER_SUPPORTED_FEATURES | backend_features);
536 if (dev->ops->get_features(dev, &dev->device_features) < 0) {
537 PMD_INIT_LOG(ERR, "(%s) Failed to get device features", dev->path);
542 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
545 dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
548 dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
550 if (dev->mac_specified)
551 dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
553 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
556 /* device does not really need to know anything about CQ,
557 * so if necessary, we just claim to support CQ
559 dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
561 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
562 /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */
563 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
564 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
565 dev->unsupported_features |=
566 (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
567 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
568 dev->unsupported_features |=
569 (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
572 /* The backend will not report this feature, we add it explicitly */
573 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
574 dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS);
578 * (frontend_features | backend_features) & ~unsupported_features;
580 dev->device_features |= dev->frontend_features;
581 dev->device_features &= ~dev->unsupported_features;
583 if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
584 virtio_user_mem_event_cb, dev)) {
585 if (rte_errno != ENOTSUP) {
586 PMD_INIT_LOG(ERR, "(%s) Failed to register mem event callback\n",
596 virtio_user_dev_uninit(struct virtio_user_dev *dev)
598 virtio_user_stop_device(dev);
600 rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
602 virtio_user_dev_uninit_notify(dev);
609 dev->ops->destroy(dev);
613 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
618 if (q_pairs > dev->max_queue_pairs) {
619 PMD_INIT_LOG(ERR, "(%s) multi-q config %u, but only %u supported",
620 dev->path, q_pairs, dev->max_queue_pairs);
624 for (i = 0; i < q_pairs; ++i)
625 ret |= dev->ops->enable_qp(dev, i, 1);
626 for (i = q_pairs; i < dev->max_queue_pairs; ++i)
627 ret |= dev->ops->enable_qp(dev, i, 0);
629 dev->queue_pairs = q_pairs;
635 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
638 struct virtio_net_ctrl_hdr *hdr;
639 virtio_net_ctrl_ack status = ~0;
640 uint16_t i, idx_data, idx_status;
641 uint32_t n_descs = 0;
643 /* locate desc for header, data, and status */
644 idx_data = vring->desc[idx_hdr].next;
648 while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
649 i = vring->desc[i].next;
653 /* locate desc for status */
657 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
658 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
659 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
662 queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
663 status = virtio_user_handle_mq(dev, queues);
664 } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
665 hdr->class == VIRTIO_NET_CTRL_MAC ||
666 hdr->class == VIRTIO_NET_CTRL_VLAN) {
671 *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
677 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
679 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
681 return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
682 wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
686 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
687 struct vring_packed *vring,
690 struct virtio_net_ctrl_hdr *hdr;
691 virtio_net_ctrl_ack status = ~0;
692 uint16_t idx_data, idx_status;
693 /* initialize to one, header is first */
694 uint32_t n_descs = 1;
696 /* locate desc for header, data, and status */
697 idx_data = idx_hdr + 1;
698 if (idx_data >= dev->queue_size)
699 idx_data -= dev->queue_size;
703 idx_status = idx_data;
704 while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) {
706 if (idx_status >= dev->queue_size)
707 idx_status -= dev->queue_size;
711 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
712 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
713 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
716 queues = *(uint16_t *)(uintptr_t)
717 vring->desc[idx_data].addr;
718 status = virtio_user_handle_mq(dev, queues);
719 } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
720 hdr->class == VIRTIO_NET_CTRL_MAC ||
721 hdr->class == VIRTIO_NET_CTRL_VLAN) {
726 *(virtio_net_ctrl_ack *)(uintptr_t)
727 vring->desc[idx_status].addr = status;
729 /* Update used descriptor */
730 vring->desc[idx_hdr].id = vring->desc[idx_status].id;
731 vring->desc[idx_hdr].len = sizeof(status);
737 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
739 struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
740 struct vring_packed *vring = &dev->packed_vrings[queue_idx];
741 uint16_t n_descs, flags;
743 /* Perform a load-acquire barrier in desc_is_avail to
744 * enforce the ordering between desc flags and desc
747 while (desc_is_avail(&vring->desc[vq->used_idx],
748 vq->used_wrap_counter)) {
750 n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring,
753 flags = VRING_DESC_F_WRITE;
754 if (vq->used_wrap_counter)
755 flags |= VRING_PACKED_DESC_F_AVAIL_USED;
757 __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
760 vq->used_idx += n_descs;
761 if (vq->used_idx >= dev->queue_size) {
762 vq->used_idx -= dev->queue_size;
763 vq->used_wrap_counter ^= 1;
769 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
771 uint16_t avail_idx, desc_idx;
772 struct vring_used_elem *uep;
774 struct vring *vring = &dev->vrings[queue_idx];
776 /* Consume avail ring, using used ring idx as first one */
777 while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
778 != vring->avail->idx) {
779 avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
781 desc_idx = vring->avail->ring[avail_idx];
783 n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
785 /* Update used ring */
786 uep = &vring->used->ring[avail_idx];
790 __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED);
795 virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
799 pthread_mutex_lock(&dev->mutex);
800 dev->status = status;
801 ret = dev->ops->set_status(dev, status);
802 if (ret && ret != -ENOTSUP)
803 PMD_INIT_LOG(ERR, "(%s) Failed to set backend status\n", dev->path);
805 pthread_mutex_unlock(&dev->mutex);
810 virtio_user_dev_update_status(struct virtio_user_dev *dev)
815 pthread_mutex_lock(&dev->mutex);
817 ret = dev->ops->get_status(dev, &status);
819 dev->status = status;
820 PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n"
822 "\t-ACKNOWLEDGE: %u\n"
825 "\t-FEATURES_OK: %u\n"
826 "\t-DEVICE_NEED_RESET: %u\n"
829 (dev->status == VIRTIO_CONFIG_STATUS_RESET),
830 !!(dev->status & VIRTIO_CONFIG_STATUS_ACK),
831 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER),
832 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK),
833 !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK),
834 !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET),
835 !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED));
836 } else if (ret != -ENOTSUP) {
837 PMD_INIT_LOG(ERR, "(%s) Failed to get backend status\n", dev->path);
840 pthread_mutex_unlock(&dev->mutex);
845 virtio_user_dev_update_link_state(struct virtio_user_dev *dev)
847 if (dev->ops->update_link_state)
848 return dev->ops->update_link_state(dev);
854 virtio_user_dev_reset_queues_packed(struct rte_eth_dev *eth_dev)
856 struct virtio_user_dev *dev = eth_dev->data->dev_private;
857 struct virtio_hw *hw = &dev->hw;
858 struct virtnet_rx *rxvq;
859 struct virtnet_tx *txvq;
862 /* Add lock to avoid queue contention. */
863 rte_spinlock_lock(&hw->state_lock);
867 * Waiting for datapath to complete before resetting queues.
868 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
872 /* Vring reset for each Tx queue and Rx queue. */
873 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
874 rxvq = eth_dev->data->rx_queues[i];
875 virtqueue_rxvq_reset_packed(virtnet_rxq_to_vq(rxvq));
876 virtio_dev_rx_queue_setup_finish(eth_dev, i);
879 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
880 txvq = eth_dev->data->tx_queues[i];
881 virtqueue_txvq_reset_packed(virtnet_txq_to_vq(txvq));
885 rte_spinlock_unlock(&hw->state_lock);
889 virtio_user_dev_delayed_disconnect_handler(void *param)
891 struct virtio_user_dev *dev = param;
892 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
894 if (rte_intr_disable(eth_dev->intr_handle) < 0) {
895 PMD_DRV_LOG(ERR, "interrupt disable failed");
898 PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d",
899 eth_dev->intr_handle->fd);
900 if (rte_intr_callback_unregister(eth_dev->intr_handle,
901 virtio_interrupt_handler,
903 PMD_DRV_LOG(ERR, "interrupt unregister failed");
905 if (dev->is_server) {
906 if (dev->ops->server_disconnect)
907 dev->ops->server_disconnect(dev);
909 eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev);
911 PMD_DRV_LOG(DEBUG, "Registering intr fd: %d",
912 eth_dev->intr_handle->fd);
914 if (rte_intr_callback_register(eth_dev->intr_handle,
915 virtio_interrupt_handler,
917 PMD_DRV_LOG(ERR, "interrupt register failed");
919 if (rte_intr_enable(eth_dev->intr_handle) < 0) {
920 PMD_DRV_LOG(ERR, "interrupt enable failed");
927 virtio_user_dev_delayed_intr_reconfig_handler(void *param)
929 struct virtio_user_dev *dev = param;
930 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
932 PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d",
933 eth_dev->intr_handle->fd);
935 if (rte_intr_callback_unregister(eth_dev->intr_handle,
936 virtio_interrupt_handler,
938 PMD_DRV_LOG(ERR, "interrupt unregister failed");
940 eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev);
942 PMD_DRV_LOG(DEBUG, "Registering intr fd: %d", eth_dev->intr_handle->fd);
944 if (rte_intr_callback_register(eth_dev->intr_handle,
945 virtio_interrupt_handler, eth_dev))
946 PMD_DRV_LOG(ERR, "interrupt register failed");
948 if (rte_intr_enable(eth_dev->intr_handle) < 0)
949 PMD_DRV_LOG(ERR, "interrupt enable failed");
953 virtio_user_dev_server_reconnect(struct virtio_user_dev *dev)
956 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
957 struct virtio_hw *hw = &dev->hw;
959 if (!dev->ops->server_reconnect) {
960 PMD_DRV_LOG(ERR, "(%s) Missing server reconnect callback", dev->path);
964 if (dev->ops->server_reconnect(dev)) {
965 PMD_DRV_LOG(ERR, "(%s) Reconnect callback call failed", dev->path);
969 old_status = dev->status;
973 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
975 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
977 if (dev->ops->get_features(dev, &dev->device_features) < 0) {
978 PMD_INIT_LOG(ERR, "get_features failed: %s",
983 dev->device_features |= dev->frontend_features;
985 /* unmask vhost-user unsupported features */
986 dev->device_features &= ~(dev->unsupported_features);
988 dev->features &= dev->device_features;
990 /* For packed ring, resetting queues is required in reconnection. */
991 if (virtio_with_packed_queue(hw) &&
992 (old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
993 PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
994 " when packed ring reconnecting.");
995 virtio_user_dev_reset_queues_packed(eth_dev);
998 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1000 /* Start the device */
1001 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
1005 if (dev->queue_pairs > 1) {
1006 ret = virtio_user_handle_mq(dev, dev->queue_pairs);
1008 PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
1012 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1013 if (rte_intr_disable(eth_dev->intr_handle) < 0) {
1014 PMD_DRV_LOG(ERR, "interrupt disable failed");
1018 * This function can be called from the interrupt handler, so
1019 * we can't unregister interrupt handler here. Setting
1020 * alarm to do that later.
1022 rte_eal_alarm_set(1,
1023 virtio_user_dev_delayed_intr_reconfig_handler,
1026 PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");