1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
12 #include <sys/eventfd.h>
13 #include <sys/types.h>
16 #include <rte_string_fns.h>
17 #include <rte_eal_memconfig.h>
20 #include "virtio_user_dev.h"
21 #include "../virtio_ethdev.h"
23 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
25 const char * const virtio_user_backend_strings[] = {
26 [VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN",
27 [VIRTIO_USER_BACKEND_VHOST_USER] = "VHOST_USER",
28 [VIRTIO_USER_BACKEND_VHOST_KERNEL] = "VHOST_NET",
29 [VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA",
33 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
35 /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
36 * firstly because vhost depends on this msg to allocate virtqueue
39 struct vhost_vring_file file;
42 file.index = queue_sel;
43 file.fd = dev->callfds[queue_sel];
44 ret = dev->ops->set_vring_call(dev, &file);
46 PMD_INIT_LOG(ERR, "(%s) Failed to create queue %u\n", dev->path, queue_sel);
54 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
57 struct vhost_vring_file file;
58 struct vhost_vring_state state;
59 struct vring *vring = &dev->vrings[queue_sel];
60 struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
61 struct vhost_vring_addr addr = {
64 .flags = 0, /* disable log */
67 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
69 (uint64_t)(uintptr_t)pq_vring->desc;
70 addr.avail_user_addr =
71 (uint64_t)(uintptr_t)pq_vring->driver;
73 (uint64_t)(uintptr_t)pq_vring->device;
75 addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
76 addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
77 addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
80 state.index = queue_sel;
81 state.num = vring->num;
82 ret = dev->ops->set_vring_num(dev, &state);
86 state.index = queue_sel;
87 state.num = 0; /* no reservation */
88 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
89 state.num |= (1 << 15);
90 ret = dev->ops->set_vring_base(dev, &state);
94 ret = dev->ops->set_vring_addr(dev, &addr);
98 /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
99 * lastly because vhost depends on this msg to judge if
102 file.index = queue_sel;
103 file.fd = dev->kickfds[queue_sel];
104 ret = dev->ops->set_vring_kick(dev, &file);
110 PMD_INIT_LOG(ERR, "(%s) Failed to kick queue %u\n", dev->path, queue_sel);
116 virtio_user_queue_setup(struct virtio_user_dev *dev,
117 int (*fn)(struct virtio_user_dev *, uint32_t))
119 uint32_t i, queue_sel;
121 for (i = 0; i < dev->max_queue_pairs; ++i) {
122 queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
123 if (fn(dev, queue_sel) < 0) {
124 PMD_DRV_LOG(ERR, "(%s) setup rx vq %u failed", dev->path, i);
128 for (i = 0; i < dev->max_queue_pairs; ++i) {
129 queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
130 if (fn(dev, queue_sel) < 0) {
131 PMD_DRV_LOG(INFO, "(%s) setup tx vq %u failed", dev->path, i);
140 virtio_user_dev_set_features(struct virtio_user_dev *dev)
145 pthread_mutex_lock(&dev->mutex);
147 /* Step 0: tell vhost to create queues */
148 if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
151 features = dev->features;
153 /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
154 features &= ~(1ull << VIRTIO_NET_F_MAC);
155 /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
156 features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
157 features &= ~(1ull << VIRTIO_NET_F_STATUS);
158 ret = dev->ops->set_features(dev, features);
161 PMD_DRV_LOG(INFO, "(%s) set features: 0x%" PRIx64, dev->path, features);
163 pthread_mutex_unlock(&dev->mutex);
169 virtio_user_start_device(struct virtio_user_dev *dev)
176 * We need to make sure that the locks will be
177 * taken in the correct order to avoid deadlocks.
179 * Before releasing this lock, this thread should
180 * not trigger any memory hotplug events.
182 * This is a temporary workaround, and should be
183 * replaced when we get proper supports from the
184 * memory subsystem in the future.
186 rte_mcfg_mem_read_lock();
187 pthread_mutex_lock(&dev->mutex);
189 /* Step 2: share memory regions */
190 ret = dev->ops->set_memory_table(dev);
194 /* Step 3: kick queues */
195 ret = virtio_user_queue_setup(dev, virtio_user_kick_queue);
199 /* Step 4: enable queues
200 * we enable the 1st queue pair by default.
202 ret = dev->ops->enable_qp(dev, 0, 1);
208 pthread_mutex_unlock(&dev->mutex);
209 rte_mcfg_mem_read_unlock();
213 pthread_mutex_unlock(&dev->mutex);
214 rte_mcfg_mem_read_unlock();
216 PMD_INIT_LOG(ERR, "(%s) Failed to start device\n", dev->path);
218 /* TODO: free resource here or caller to check */
222 int virtio_user_stop_device(struct virtio_user_dev *dev)
224 struct vhost_vring_state state;
228 pthread_mutex_lock(&dev->mutex);
232 for (i = 0; i < dev->max_queue_pairs; ++i) {
233 ret = dev->ops->enable_qp(dev, i, 0);
238 /* Stop the backend. */
239 for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
241 ret = dev->ops->get_vring_base(dev, &state);
243 PMD_DRV_LOG(ERR, "(%s) get_vring_base failed, index=%u", dev->path, i);
248 dev->started = false;
251 pthread_mutex_unlock(&dev->mutex);
255 pthread_mutex_unlock(&dev->mutex);
257 PMD_INIT_LOG(ERR, "(%s) Failed to stop device\n", dev->path);
263 parse_mac(struct virtio_user_dev *dev, const char *mac)
265 struct rte_ether_addr tmp;
270 if (rte_ether_unformat_addr(mac, &tmp) == 0) {
271 memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN);
272 dev->mac_specified = 1;
274 /* ignore the wrong mac, use random mac */
275 PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac);
280 virtio_user_dev_init_notify(struct virtio_user_dev *dev)
286 for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
287 if (i >= dev->max_queue_pairs * 2) {
288 dev->kickfds[i] = -1;
289 dev->callfds[i] = -1;
293 /* May use invalid flag, but some backend uses kickfd and
294 * callfd as criteria to judge if dev is alive. so finally we
297 callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
299 PMD_DRV_LOG(ERR, "(%s) callfd error, %s", dev->path, strerror(errno));
302 kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
305 PMD_DRV_LOG(ERR, "(%s) kickfd error, %s", dev->path, strerror(errno));
308 dev->callfds[i] = callfd;
309 dev->kickfds[i] = kickfd;
312 if (i < VIRTIO_MAX_VIRTQUEUES) {
313 for (j = 0; j < i; ++j) {
314 close(dev->callfds[j]);
315 close(dev->kickfds[j]);
325 virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
328 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
330 if (!eth_dev->intr_handle) {
331 eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle));
332 if (!eth_dev->intr_handle) {
333 PMD_DRV_LOG(ERR, "(%s) failed to allocate intr_handle", dev->path);
336 memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle));
339 for (i = 0; i < dev->max_queue_pairs; ++i)
340 eth_dev->intr_handle->efds[i] = dev->callfds[i];
341 eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
342 eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
343 eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
344 /* For virtio vdev, no need to read counter for clean */
345 eth_dev->intr_handle->efd_counter_size = 0;
346 eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev);
352 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
354 size_t len __rte_unused,
357 struct virtio_user_dev *dev = arg;
358 struct rte_memseg_list *msl;
362 /* ignore externally allocated memory */
363 msl = rte_mem_virt2memseg_list(addr);
367 pthread_mutex_lock(&dev->mutex);
369 if (dev->started == false)
372 /* Step 1: pause the active queues */
373 for (i = 0; i < dev->queue_pairs; i++) {
374 ret = dev->ops->enable_qp(dev, i, 0);
379 /* Step 2: update memory regions */
380 ret = dev->ops->set_memory_table(dev);
384 /* Step 3: resume the active queues */
385 for (i = 0; i < dev->queue_pairs; i++) {
386 ret = dev->ops->enable_qp(dev, i, 1);
392 pthread_mutex_unlock(&dev->mutex);
395 PMD_DRV_LOG(ERR, "(%s) Failed to update memory table\n", dev->path);
399 virtio_user_dev_setup(struct virtio_user_dev *dev)
403 dev->vhostfds = NULL;
406 if (dev->is_server) {
407 if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) {
408 PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!");
413 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) {
414 dev->ops = &virtio_ops_user;
415 } else if (dev->backend_type ==
416 VIRTIO_USER_BACKEND_VHOST_KERNEL) {
417 dev->ops = &virtio_ops_kernel;
419 dev->vhostfds = malloc(dev->max_queue_pairs *
421 dev->tapfds = malloc(dev->max_queue_pairs *
423 if (!dev->vhostfds || !dev->tapfds) {
424 PMD_INIT_LOG(ERR, "(%s) Failed to allocate FDs", dev->path);
428 for (q = 0; q < dev->max_queue_pairs; ++q) {
429 dev->vhostfds[q] = -1;
432 } else if (dev->backend_type ==
433 VIRTIO_USER_BACKEND_VHOST_VDPA) {
434 dev->ops = &virtio_ops_vdpa;
436 PMD_DRV_LOG(ERR, "(%s) Unknown backend type", dev->path);
441 if (dev->ops->setup(dev) < 0) {
442 PMD_INIT_LOG(ERR, "(%s) Failed to setup backend\n", dev->path);
446 if (virtio_user_dev_init_notify(dev) < 0) {
447 PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers\n", dev->path);
451 if (virtio_user_fill_intr_handle(dev) < 0) {
452 PMD_INIT_LOG(ERR, "(%s) Failed to init interrupt handler\n", dev->path);
459 /* Use below macro to filter features from vhost backend */
460 #define VIRTIO_USER_SUPPORTED_FEATURES \
461 (1ULL << VIRTIO_NET_F_MAC | \
462 1ULL << VIRTIO_NET_F_STATUS | \
463 1ULL << VIRTIO_NET_F_MQ | \
464 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \
465 1ULL << VIRTIO_NET_F_CTRL_VQ | \
466 1ULL << VIRTIO_NET_F_CTRL_RX | \
467 1ULL << VIRTIO_NET_F_CTRL_VLAN | \
468 1ULL << VIRTIO_NET_F_CSUM | \
469 1ULL << VIRTIO_NET_F_HOST_TSO4 | \
470 1ULL << VIRTIO_NET_F_HOST_TSO6 | \
471 1ULL << VIRTIO_NET_F_MRG_RXBUF | \
472 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \
473 1ULL << VIRTIO_NET_F_GUEST_CSUM | \
474 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
475 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
476 1ULL << VIRTIO_F_IN_ORDER | \
477 1ULL << VIRTIO_F_VERSION_1 | \
478 1ULL << VIRTIO_F_RING_PACKED)
481 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
482 int cq, int queue_size, const char *mac, char **ifname,
483 int server, int mrg_rxbuf, int in_order, int packed_vq,
484 enum virtio_user_backend_type backend_type)
486 uint64_t backend_features;
488 pthread_mutex_init(&dev->mutex, NULL);
489 strlcpy(dev->path, path, PATH_MAX);
491 dev->max_queue_pairs = queues;
492 dev->queue_pairs = 1; /* mq disabled by default */
493 dev->queue_size = queue_size;
494 dev->is_server = server;
495 dev->mac_specified = 0;
496 dev->frontend_features = 0;
497 dev->unsupported_features = 0;
498 dev->backend_type = backend_type;
503 dev->ifname = *ifname;
507 if (virtio_user_dev_setup(dev) < 0) {
508 PMD_INIT_LOG(ERR, "(%s) backend set up fails", dev->path);
512 if (dev->ops->set_owner(dev) < 0) {
513 PMD_INIT_LOG(ERR, "(%s) Failed to set backend owner", dev->path);
517 if (dev->ops->get_backend_features(&backend_features) < 0) {
518 PMD_INIT_LOG(ERR, "(%s) Failed to get backend features", dev->path);
522 dev->unsupported_features = ~(VIRTIO_USER_SUPPORTED_FEATURES | backend_features);
524 if (dev->ops->get_features(dev, &dev->device_features) < 0) {
525 PMD_INIT_LOG(ERR, "(%s) Failed to get device features", dev->path);
530 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
533 dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
536 dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
538 if (dev->mac_specified)
539 dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
541 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
544 /* device does not really need to know anything about CQ,
545 * so if necessary, we just claim to support CQ
547 dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
549 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
550 /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */
551 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
552 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
553 dev->unsupported_features |=
554 (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
555 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
556 dev->unsupported_features |=
557 (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
560 /* The backend will not report this feature, we add it explicitly */
561 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
562 dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS);
566 * (frontend_features | backend_features) & ~unsupported_features;
568 dev->device_features |= dev->frontend_features;
569 dev->device_features &= ~dev->unsupported_features;
571 if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
572 virtio_user_mem_event_cb, dev)) {
573 if (rte_errno != ENOTSUP) {
574 PMD_INIT_LOG(ERR, "(%s) Failed to register mem event callback\n",
584 virtio_user_dev_uninit(struct virtio_user_dev *dev)
588 virtio_user_stop_device(dev);
590 rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
592 for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
593 close(dev->callfds[i]);
594 close(dev->kickfds[i]);
597 for (i = 0; i < dev->max_queue_pairs; ++i) {
598 close(dev->vhostfds[i]);
599 if (dev->tapfds[i] >= 0)
600 close(dev->tapfds[i]);
611 dev->ops->destroy(dev);
615 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
620 if (q_pairs > dev->max_queue_pairs) {
621 PMD_INIT_LOG(ERR, "(%s) multi-q config %u, but only %u supported",
622 dev->path, q_pairs, dev->max_queue_pairs);
626 for (i = 0; i < q_pairs; ++i)
627 ret |= dev->ops->enable_qp(dev, i, 1);
628 for (i = q_pairs; i < dev->max_queue_pairs; ++i)
629 ret |= dev->ops->enable_qp(dev, i, 0);
631 dev->queue_pairs = q_pairs;
637 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
640 struct virtio_net_ctrl_hdr *hdr;
641 virtio_net_ctrl_ack status = ~0;
642 uint16_t i, idx_data, idx_status;
643 uint32_t n_descs = 0;
645 /* locate desc for header, data, and status */
646 idx_data = vring->desc[idx_hdr].next;
650 while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
651 i = vring->desc[i].next;
655 /* locate desc for status */
659 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
660 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
661 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
664 queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
665 status = virtio_user_handle_mq(dev, queues);
666 } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
667 hdr->class == VIRTIO_NET_CTRL_MAC ||
668 hdr->class == VIRTIO_NET_CTRL_VLAN) {
673 *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
679 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
681 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
683 return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
684 wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
688 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
689 struct vring_packed *vring,
692 struct virtio_net_ctrl_hdr *hdr;
693 virtio_net_ctrl_ack status = ~0;
694 uint16_t idx_data, idx_status;
695 /* initialize to one, header is first */
696 uint32_t n_descs = 1;
698 /* locate desc for header, data, and status */
699 idx_data = idx_hdr + 1;
700 if (idx_data >= dev->queue_size)
701 idx_data -= dev->queue_size;
705 idx_status = idx_data;
706 while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) {
708 if (idx_status >= dev->queue_size)
709 idx_status -= dev->queue_size;
713 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
714 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
715 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
718 queues = *(uint16_t *)(uintptr_t)
719 vring->desc[idx_data].addr;
720 status = virtio_user_handle_mq(dev, queues);
721 } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
722 hdr->class == VIRTIO_NET_CTRL_MAC ||
723 hdr->class == VIRTIO_NET_CTRL_VLAN) {
728 *(virtio_net_ctrl_ack *)(uintptr_t)
729 vring->desc[idx_status].addr = status;
731 /* Update used descriptor */
732 vring->desc[idx_hdr].id = vring->desc[idx_status].id;
733 vring->desc[idx_hdr].len = sizeof(status);
739 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
741 struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
742 struct vring_packed *vring = &dev->packed_vrings[queue_idx];
743 uint16_t n_descs, flags;
745 /* Perform a load-acquire barrier in desc_is_avail to
746 * enforce the ordering between desc flags and desc
749 while (desc_is_avail(&vring->desc[vq->used_idx],
750 vq->used_wrap_counter)) {
752 n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring,
755 flags = VRING_DESC_F_WRITE;
756 if (vq->used_wrap_counter)
757 flags |= VRING_PACKED_DESC_F_AVAIL_USED;
759 __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
762 vq->used_idx += n_descs;
763 if (vq->used_idx >= dev->queue_size) {
764 vq->used_idx -= dev->queue_size;
765 vq->used_wrap_counter ^= 1;
771 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
773 uint16_t avail_idx, desc_idx;
774 struct vring_used_elem *uep;
776 struct vring *vring = &dev->vrings[queue_idx];
778 /* Consume avail ring, using used ring idx as first one */
779 while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
780 != vring->avail->idx) {
781 avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
783 desc_idx = vring->avail->ring[avail_idx];
785 n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
787 /* Update used ring */
788 uep = &vring->used->ring[avail_idx];
792 __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED);
797 virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
801 pthread_mutex_lock(&dev->mutex);
802 dev->status = status;
803 ret = dev->ops->set_status(dev, status);
804 if (ret && ret != -ENOTSUP)
805 PMD_INIT_LOG(ERR, "(%s) Failed to set backend status\n", dev->path);
807 pthread_mutex_unlock(&dev->mutex);
812 virtio_user_dev_update_status(struct virtio_user_dev *dev)
817 pthread_mutex_lock(&dev->mutex);
819 ret = dev->ops->get_status(dev, &status);
821 dev->status = status;
822 PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n"
824 "\t-ACKNOWLEDGE: %u\n"
827 "\t-FEATURES_OK: %u\n"
828 "\t-DEVICE_NEED_RESET: %u\n"
831 (dev->status == VIRTIO_CONFIG_STATUS_RESET),
832 !!(dev->status & VIRTIO_CONFIG_STATUS_ACK),
833 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER),
834 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK),
835 !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK),
836 !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET),
837 !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED));
838 } else if (ret != -ENOTSUP) {
839 PMD_INIT_LOG(ERR, "(%s) Failed to get backend status\n", dev->path);
842 pthread_mutex_unlock(&dev->mutex);
847 virtio_user_dev_update_link_state(struct virtio_user_dev *dev)
849 if (dev->ops->update_link_state)
850 return dev->ops->update_link_state(dev);
856 virtio_user_dev_reset_queues_packed(struct rte_eth_dev *eth_dev)
858 struct virtio_user_dev *dev = eth_dev->data->dev_private;
859 struct virtio_hw *hw = &dev->hw;
860 struct virtnet_rx *rxvq;
861 struct virtnet_tx *txvq;
864 /* Add lock to avoid queue contention. */
865 rte_spinlock_lock(&hw->state_lock);
869 * Waiting for datapath to complete before resetting queues.
870 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
874 /* Vring reset for each Tx queue and Rx queue. */
875 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
876 rxvq = eth_dev->data->rx_queues[i];
877 virtqueue_rxvq_reset_packed(rxvq->vq);
878 virtio_dev_rx_queue_setup_finish(eth_dev, i);
881 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
882 txvq = eth_dev->data->tx_queues[i];
883 virtqueue_txvq_reset_packed(txvq->vq);
887 rte_spinlock_unlock(&hw->state_lock);
891 virtio_user_dev_delayed_handler(void *param)
893 struct virtio_user_dev *dev = param;
894 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
896 if (rte_intr_disable(eth_dev->intr_handle) < 0) {
897 PMD_DRV_LOG(ERR, "interrupt disable failed");
900 rte_intr_callback_unregister(eth_dev->intr_handle,
901 virtio_interrupt_handler, eth_dev);
902 if (dev->is_server) {
903 if (dev->ops->server_disconnect)
904 dev->ops->server_disconnect(dev);
905 eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev);
906 rte_intr_callback_register(eth_dev->intr_handle,
907 virtio_interrupt_handler, eth_dev);
908 if (rte_intr_enable(eth_dev->intr_handle) < 0) {
909 PMD_DRV_LOG(ERR, "interrupt enable failed");
916 virtio_user_dev_server_reconnect(struct virtio_user_dev *dev)
919 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
920 struct virtio_hw *hw = &dev->hw;
922 if (!dev->ops->server_reconnect) {
923 PMD_DRV_LOG(ERR, "(%s) Missing server reconnect callback", dev->path);
927 if (dev->ops->server_reconnect(dev)) {
928 PMD_DRV_LOG(ERR, "(%s) Reconnect callback call failed", dev->path);
932 old_status = dev->status;
936 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
938 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
940 if (dev->ops->get_features(dev, &dev->device_features) < 0) {
941 PMD_INIT_LOG(ERR, "get_features failed: %s",
946 dev->device_features |= dev->frontend_features;
948 /* unmask vhost-user unsupported features */
949 dev->device_features &= ~(dev->unsupported_features);
951 dev->features &= dev->device_features;
953 /* For packed ring, resetting queues is required in reconnection. */
954 if (virtio_with_packed_queue(hw) &&
955 (old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
956 PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
957 " when packed ring reconnecting.");
958 virtio_user_dev_reset_queues_packed(eth_dev);
961 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
963 /* Start the device */
964 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
968 if (dev->queue_pairs > 1) {
969 ret = virtio_user_handle_mq(dev, dev->queue_pairs);
971 PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
975 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
976 if (rte_intr_disable(eth_dev->intr_handle) < 0) {
977 PMD_DRV_LOG(ERR, "interrupt disable failed");
980 rte_intr_callback_unregister(eth_dev->intr_handle,
981 virtio_interrupt_handler,
984 eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev);
985 rte_intr_callback_register(eth_dev->intr_handle,
986 virtio_interrupt_handler, eth_dev);
988 if (rte_intr_enable(eth_dev->intr_handle) < 0) {
989 PMD_DRV_LOG(ERR, "interrupt enable failed");
993 PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");