1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
12 #include <sys/eventfd.h>
13 #include <sys/types.h>
16 #include <rte_string_fns.h>
17 #include <rte_eal_memconfig.h>
20 #include "virtio_user_dev.h"
21 #include "../virtio_ethdev.h"
23 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
26 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
28 /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
29 * firstly because vhost depends on this msg to allocate virtqueue
32 struct vhost_vring_file file;
34 file.index = queue_sel;
35 file.fd = dev->callfds[queue_sel];
36 dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file);
42 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
44 struct vhost_vring_file file;
45 struct vhost_vring_state state;
46 struct vring *vring = &dev->vrings[queue_sel];
47 struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
48 struct vhost_vring_addr addr = {
51 .flags = 0, /* disable log */
54 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
56 (uint64_t)(uintptr_t)pq_vring->desc;
57 addr.avail_user_addr =
58 (uint64_t)(uintptr_t)pq_vring->driver;
60 (uint64_t)(uintptr_t)pq_vring->device;
62 addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
63 addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
64 addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
67 state.index = queue_sel;
68 state.num = vring->num;
69 dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
71 state.index = queue_sel;
72 state.num = 0; /* no reservation */
73 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
74 state.num |= (1 << 15);
75 dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
77 dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
79 /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
80 * lastly because vhost depends on this msg to judge if
83 file.index = queue_sel;
84 file.fd = dev->kickfds[queue_sel];
85 dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file);
91 virtio_user_queue_setup(struct virtio_user_dev *dev,
92 int (*fn)(struct virtio_user_dev *, uint32_t))
94 uint32_t i, queue_sel;
96 for (i = 0; i < dev->max_queue_pairs; ++i) {
97 queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
98 if (fn(dev, queue_sel) < 0) {
99 PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i);
103 for (i = 0; i < dev->max_queue_pairs; ++i) {
104 queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
105 if (fn(dev, queue_sel) < 0) {
106 PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i);
115 is_vhost_user_by_type(const char *path)
119 if (stat(path, &sb) == -1)
122 return S_ISSOCK(sb.st_mode);
126 virtio_user_start_device(struct virtio_user_dev *dev)
134 * We need to make sure that the locks will be
135 * taken in the correct order to avoid deadlocks.
137 * Before releasing this lock, this thread should
138 * not trigger any memory hotplug events.
140 * This is a temporary workaround, and should be
141 * replaced when we get proper supports from the
142 * memory subsystem in the future.
144 rte_mcfg_mem_read_lock();
145 pthread_mutex_lock(&dev->mutex);
147 if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
150 /* Step 0: tell vhost to create queues */
151 if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
154 /* Step 1: negotiate protocol features & set features */
155 features = dev->features;
158 /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
159 features &= ~(1ull << VIRTIO_NET_F_MAC);
160 /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
161 features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
162 features &= ~(1ull << VIRTIO_NET_F_STATUS);
163 ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
166 PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
168 /* Step 2: share memory regions */
169 ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
173 /* Step 3: kick queues */
174 if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0)
177 /* Step 4: enable queues
178 * we enable the 1st queue pair by default.
180 dev->ops->enable_qp(dev, 0, 1);
183 pthread_mutex_unlock(&dev->mutex);
184 rte_mcfg_mem_read_unlock();
188 pthread_mutex_unlock(&dev->mutex);
189 rte_mcfg_mem_read_unlock();
190 /* TODO: free resource here or caller to check */
194 int virtio_user_stop_device(struct virtio_user_dev *dev)
196 struct vhost_vring_state state;
200 pthread_mutex_lock(&dev->mutex);
204 for (i = 0; i < dev->max_queue_pairs; ++i)
205 dev->ops->enable_qp(dev, i, 0);
207 /* Stop the backend. */
208 for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
210 if (dev->ops->send_request(dev, VHOST_USER_GET_VRING_BASE,
212 PMD_DRV_LOG(ERR, "get_vring_base failed, index=%u\n",
219 dev->started = false;
221 pthread_mutex_unlock(&dev->mutex);
227 parse_mac(struct virtio_user_dev *dev, const char *mac)
229 struct rte_ether_addr tmp;
234 if (rte_ether_unformat_addr(mac, &tmp) == 0) {
235 memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN);
236 dev->mac_specified = 1;
238 /* ignore the wrong mac, use random mac */
239 PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac);
244 virtio_user_dev_init_notify(struct virtio_user_dev *dev)
250 for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
251 if (i >= dev->max_queue_pairs * 2) {
252 dev->kickfds[i] = -1;
253 dev->callfds[i] = -1;
257 /* May use invalid flag, but some backend uses kickfd and
258 * callfd as criteria to judge if dev is alive. so finally we
261 callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
263 PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
266 kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
268 PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
271 dev->callfds[i] = callfd;
272 dev->kickfds[i] = kickfd;
275 if (i < VIRTIO_MAX_VIRTQUEUES) {
276 for (j = 0; j <= i; ++j) {
277 close(dev->callfds[j]);
278 close(dev->kickfds[j]);
288 virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
291 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
293 if (!eth_dev->intr_handle) {
294 eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle));
295 if (!eth_dev->intr_handle) {
296 PMD_DRV_LOG(ERR, "fail to allocate intr_handle");
299 memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle));
302 for (i = 0; i < dev->max_queue_pairs; ++i)
303 eth_dev->intr_handle->efds[i] = dev->callfds[i];
304 eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
305 eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
306 eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
307 /* For virtio vdev, no need to read counter for clean */
308 eth_dev->intr_handle->efd_counter_size = 0;
309 eth_dev->intr_handle->fd = -1;
310 if (dev->vhostfd >= 0)
311 eth_dev->intr_handle->fd = dev->vhostfd;
312 else if (dev->is_server)
313 eth_dev->intr_handle->fd = dev->listenfd;
319 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
321 size_t len __rte_unused,
324 struct virtio_user_dev *dev = arg;
325 struct rte_memseg_list *msl;
328 /* ignore externally allocated memory */
329 msl = rte_mem_virt2memseg_list(addr);
333 pthread_mutex_lock(&dev->mutex);
335 if (dev->started == false)
338 /* Step 1: pause the active queues */
339 for (i = 0; i < dev->queue_pairs; i++)
340 dev->ops->enable_qp(dev, i, 0);
342 /* Step 2: update memory regions */
343 dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
345 /* Step 3: resume the active queues */
346 for (i = 0; i < dev->queue_pairs; i++)
347 dev->ops->enable_qp(dev, i, 1);
350 pthread_mutex_unlock(&dev->mutex);
354 virtio_user_dev_setup(struct virtio_user_dev *dev)
359 dev->vhostfds = NULL;
362 if (dev->is_server) {
363 if (access(dev->path, F_OK) == 0 &&
364 !is_vhost_user_by_type(dev->path)) {
365 PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!");
368 dev->ops = &virtio_ops_user;
370 if (is_vhost_user_by_type(dev->path)) {
371 dev->ops = &virtio_ops_user;
373 dev->ops = &virtio_ops_kernel;
375 dev->vhostfds = malloc(dev->max_queue_pairs *
377 dev->tapfds = malloc(dev->max_queue_pairs *
379 if (!dev->vhostfds || !dev->tapfds) {
380 PMD_INIT_LOG(ERR, "Failed to malloc");
384 for (q = 0; q < dev->max_queue_pairs; ++q) {
385 dev->vhostfds[q] = -1;
391 if (dev->ops->setup(dev) < 0)
394 if (virtio_user_dev_init_notify(dev) < 0)
397 if (virtio_user_fill_intr_handle(dev) < 0)
403 /* Use below macro to filter features from vhost backend */
404 #define VIRTIO_USER_SUPPORTED_FEATURES \
405 (1ULL << VIRTIO_NET_F_MAC | \
406 1ULL << VIRTIO_NET_F_STATUS | \
407 1ULL << VIRTIO_NET_F_MQ | \
408 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \
409 1ULL << VIRTIO_NET_F_CTRL_VQ | \
410 1ULL << VIRTIO_NET_F_CTRL_RX | \
411 1ULL << VIRTIO_NET_F_CTRL_VLAN | \
412 1ULL << VIRTIO_NET_F_CSUM | \
413 1ULL << VIRTIO_NET_F_HOST_TSO4 | \
414 1ULL << VIRTIO_NET_F_HOST_TSO6 | \
415 1ULL << VIRTIO_NET_F_MRG_RXBUF | \
416 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \
417 1ULL << VIRTIO_NET_F_GUEST_CSUM | \
418 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
419 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
420 1ULL << VIRTIO_F_IN_ORDER | \
421 1ULL << VIRTIO_F_VERSION_1 | \
422 1ULL << VIRTIO_F_RING_PACKED | \
423 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
425 #define VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES \
426 (1ULL << VHOST_USER_PROTOCOL_F_MQ | \
427 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
428 1ULL << VHOST_USER_PROTOCOL_F_STATUS)
431 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
432 int cq, int queue_size, const char *mac, char **ifname,
433 int server, int mrg_rxbuf, int in_order, int packed_vq)
435 uint64_t protocol_features = 0;
437 pthread_mutex_init(&dev->mutex, NULL);
438 strlcpy(dev->path, path, PATH_MAX);
440 dev->max_queue_pairs = queues;
441 dev->queue_pairs = 1; /* mq disabled by default */
442 dev->queue_size = queue_size;
443 dev->is_server = server;
444 dev->mac_specified = 0;
445 dev->frontend_features = 0;
446 dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES;
447 dev->protocol_features = VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES;
451 dev->ifname = *ifname;
455 if (virtio_user_dev_setup(dev) < 0) {
456 PMD_INIT_LOG(ERR, "backend set up fails");
460 if (!is_vhost_user_by_type(dev->path))
461 dev->unsupported_features |=
462 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
464 if (!dev->is_server) {
465 if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER,
467 PMD_INIT_LOG(ERR, "set_owner fails: %s",
472 if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
473 &dev->device_features) < 0) {
474 PMD_INIT_LOG(ERR, "get_features failed: %s",
480 if (dev->device_features &
481 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) {
482 if (dev->ops->send_request(dev,
483 VHOST_USER_GET_PROTOCOL_FEATURES,
487 dev->protocol_features &= protocol_features;
489 if (dev->ops->send_request(dev,
490 VHOST_USER_SET_PROTOCOL_FEATURES,
491 &dev->protocol_features))
494 if (!(dev->protocol_features &
495 (1ULL << VHOST_USER_PROTOCOL_F_MQ)))
496 dev->unsupported_features |=
497 (1ull << VIRTIO_NET_F_MQ);
500 /* We just pretend vhost-user can support all these features.
501 * Note that this could be problematic that if some feature is
502 * negotiated but not supported by the vhost-user which comes
505 dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
511 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
514 dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
517 dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
519 if (dev->mac_specified)
520 dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
522 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
525 /* device does not really need to know anything about CQ,
526 * so if necessary, we just claim to support CQ
528 dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
530 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
531 /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */
532 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
533 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
534 dev->unsupported_features |=
535 (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
536 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
537 dev->unsupported_features |=
538 (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
541 /* The backend will not report this feature, we add it explicitly */
542 if (is_vhost_user_by_type(dev->path))
543 dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS);
547 * (frontend_features | backend_features) & ~unsupported_features;
549 dev->device_features |= dev->frontend_features;
550 dev->device_features &= ~dev->unsupported_features;
552 if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
553 virtio_user_mem_event_cb, dev)) {
554 if (rte_errno != ENOTSUP) {
555 PMD_INIT_LOG(ERR, "Failed to register mem event"
565 virtio_user_dev_uninit(struct virtio_user_dev *dev)
569 virtio_user_stop_device(dev);
571 rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
573 for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
574 close(dev->callfds[i]);
575 close(dev->kickfds[i]);
578 if (dev->vhostfd >= 0)
581 if (dev->is_server && dev->listenfd >= 0) {
582 close(dev->listenfd);
587 for (i = 0; i < dev->max_queue_pairs; ++i) {
588 close(dev->vhostfds[i]);
589 if (dev->tapfds[i] >= 0)
590 close(dev->tapfds[i]);
603 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
608 if (q_pairs > dev->max_queue_pairs) {
609 PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported",
610 q_pairs, dev->max_queue_pairs);
614 /* Server mode can't enable queue pairs if vhostfd is invalid,
615 * always return 0 in this case.
617 if (!dev->is_server || dev->vhostfd >= 0) {
618 for (i = 0; i < q_pairs; ++i)
619 ret |= dev->ops->enable_qp(dev, i, 1);
620 for (i = q_pairs; i < dev->max_queue_pairs; ++i)
621 ret |= dev->ops->enable_qp(dev, i, 0);
623 dev->queue_pairs = q_pairs;
629 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
632 struct virtio_net_ctrl_hdr *hdr;
633 virtio_net_ctrl_ack status = ~0;
634 uint16_t i, idx_data, idx_status;
635 uint32_t n_descs = 0;
637 /* locate desc for header, data, and status */
638 idx_data = vring->desc[idx_hdr].next;
642 while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
643 i = vring->desc[i].next;
647 /* locate desc for status */
651 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
652 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
653 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
656 queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
657 status = virtio_user_handle_mq(dev, queues);
658 } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
659 hdr->class == VIRTIO_NET_CTRL_MAC ||
660 hdr->class == VIRTIO_NET_CTRL_VLAN) {
665 *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
671 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
673 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
675 return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
676 wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
680 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
681 struct vring_packed *vring,
684 struct virtio_net_ctrl_hdr *hdr;
685 virtio_net_ctrl_ack status = ~0;
686 uint16_t idx_data, idx_status;
687 /* initialize to one, header is first */
688 uint32_t n_descs = 1;
690 /* locate desc for header, data, and status */
691 idx_data = idx_hdr + 1;
692 if (idx_data >= dev->queue_size)
693 idx_data -= dev->queue_size;
697 idx_status = idx_data;
698 while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) {
700 if (idx_status >= dev->queue_size)
701 idx_status -= dev->queue_size;
705 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
706 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
707 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
710 queues = *(uint16_t *)(uintptr_t)
711 vring->desc[idx_data].addr;
712 status = virtio_user_handle_mq(dev, queues);
713 } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
714 hdr->class == VIRTIO_NET_CTRL_MAC ||
715 hdr->class == VIRTIO_NET_CTRL_VLAN) {
720 *(virtio_net_ctrl_ack *)(uintptr_t)
721 vring->desc[idx_status].addr = status;
723 /* Update used descriptor */
724 vring->desc[idx_hdr].id = vring->desc[idx_status].id;
725 vring->desc[idx_hdr].len = sizeof(status);
731 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
733 struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
734 struct vring_packed *vring = &dev->packed_vrings[queue_idx];
735 uint16_t n_descs, flags;
737 /* Perform a load-acquire barrier in desc_is_avail to
738 * enforce the ordering between desc flags and desc
741 while (desc_is_avail(&vring->desc[vq->used_idx],
742 vq->used_wrap_counter)) {
744 n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring,
747 flags = VRING_DESC_F_WRITE;
748 if (vq->used_wrap_counter)
749 flags |= VRING_PACKED_DESC_F_AVAIL_USED;
751 __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
754 vq->used_idx += n_descs;
755 if (vq->used_idx >= dev->queue_size) {
756 vq->used_idx -= dev->queue_size;
757 vq->used_wrap_counter ^= 1;
763 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
765 uint16_t avail_idx, desc_idx;
766 struct vring_used_elem *uep;
768 struct vring *vring = &dev->vrings[queue_idx];
770 /* Consume avail ring, using used ring idx as first one */
771 while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
772 != vring->avail->idx) {
773 avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
775 desc_idx = vring->avail->ring[avail_idx];
777 n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
779 /* Update used ring */
780 uep = &vring->used->ring[avail_idx];
784 __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED);
789 virtio_user_send_status_update(struct virtio_user_dev *dev, uint8_t status)
792 uint64_t arg = status;
794 /* Vhost-user only for now */
795 if (!is_vhost_user_by_type(dev->path))
798 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS)))
801 ret = dev->ops->send_request(dev, VHOST_USER_SET_STATUS, &arg);
803 PMD_INIT_LOG(ERR, "VHOST_USER_SET_STATUS failed (%d): %s", ret,
812 virtio_user_update_status(struct virtio_user_dev *dev)
817 /* Vhost-user only for now */
818 if (!is_vhost_user_by_type(dev->path))
821 if (!(dev->protocol_features & (1UL << VHOST_USER_PROTOCOL_F_STATUS)))
824 err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS, &ret);
826 PMD_INIT_LOG(ERR, "VHOST_USER_GET_STATUS failed (%d): %s", err,
830 if (ret > UINT8_MAX) {
831 PMD_INIT_LOG(ERR, "Invalid VHOST_USER_GET_STATUS response 0x%" PRIx64 "\n", ret);
836 PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n"
838 "\t-ACKNOWLEDGE: %u\n"
841 "\t-FEATURES_OK: %u\n"
842 "\t-DEVICE_NEED_RESET: %u\n"
845 (dev->status == VIRTIO_CONFIG_STATUS_RESET),
846 !!(dev->status & VIRTIO_CONFIG_STATUS_ACK),
847 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER),
848 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK),
849 !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK),
850 !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET),
851 !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED));