+int
+is_vhost_user_by_type(const char *path)
+{
+ struct stat sb;
+
+ if (stat(path, &sb) == -1)
+ return 0;
+
+ return S_ISSOCK(sb.st_mode);
+}
+
+static int
+virtio_user_dev_init_notify(struct virtio_user_dev *dev)
+{
+ uint32_t i, j;
+ int callfd;
+ int kickfd;
+
+ for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
+ if (i >= dev->max_queue_pairs * 2) {
+ dev->kickfds[i] = -1;
+ dev->callfds[i] = -1;
+ continue;
+ }
+
+ /* May use invalid flag, but some backend uses kickfd and
+ * callfd as criteria to judge if dev is alive. so finally we
+ * use real event_fd.
+ */
+ callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (callfd < 0) {
+ PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
+ break;
+ }
+ kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (kickfd < 0) {
+ PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
+ break;
+ }
+ dev->callfds[i] = callfd;
+ dev->kickfds[i] = kickfd;
+ }
+
+ if (i < VIRTIO_MAX_VIRTQUEUES) {
+ for (j = 0; j <= i; ++j) {
+ close(dev->callfds[j]);
+ close(dev->kickfds[j]);
+ }
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
+{
+ uint32_t i;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+
+ if (!eth_dev->intr_handle) {
+ eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle));
+ if (!eth_dev->intr_handle) {
+ PMD_DRV_LOG(ERR, "fail to allocate intr_handle");
+ return -1;
+ }
+ memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle));
+ }
+
+ for (i = 0; i < dev->max_queue_pairs; ++i)
+ eth_dev->intr_handle->efds[i] = dev->callfds[i];
+ eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
+ eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
+ eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
+ if (dev->vhostfd >= 0)
+ eth_dev->intr_handle->fd = dev->vhostfd;
+
+ return 0;
+}
+
+static int
+virtio_user_dev_setup(struct virtio_user_dev *dev)
+{
+ uint32_t q;
+
+ dev->vhostfd = -1;
+ dev->vhostfds = NULL;
+ dev->tapfds = NULL;
+
+ if (is_vhost_user_by_type(dev->path)) {
+ dev->ops = &ops_user;
+ } else {
+ dev->ops = &ops_kernel;
+
+ dev->vhostfds = malloc(dev->max_queue_pairs * sizeof(int));
+ dev->tapfds = malloc(dev->max_queue_pairs * sizeof(int));
+ if (!dev->vhostfds || !dev->tapfds) {
+ PMD_INIT_LOG(ERR, "Failed to malloc");
+ return -1;
+ }
+
+ for (q = 0; q < dev->max_queue_pairs; ++q) {
+ dev->vhostfds[q] = -1;
+ dev->tapfds[q] = -1;
+ }
+ }
+
+ if (dev->ops->setup(dev) < 0)
+ return -1;
+
+ if (virtio_user_dev_init_notify(dev) < 0)
+ return -1;
+
+ if (virtio_user_fill_intr_handle(dev) < 0)
+ return -1;
+
+ return 0;
+}
+
+/* Use below macro to filter features from vhost backend */
+#define VIRTIO_USER_SUPPORTED_FEATURES \
+ (1ULL << VIRTIO_NET_F_MAC | \
+ 1ULL << VIRTIO_NET_F_STATUS | \
+ 1ULL << VIRTIO_NET_F_MQ | \
+ 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \
+ 1ULL << VIRTIO_NET_F_CTRL_VQ | \
+ 1ULL << VIRTIO_NET_F_CTRL_RX | \
+ 1ULL << VIRTIO_NET_F_CTRL_VLAN | \
+ 1ULL << VIRTIO_NET_F_CSUM | \
+ 1ULL << VIRTIO_NET_F_HOST_TSO4 | \
+ 1ULL << VIRTIO_NET_F_HOST_TSO6 | \
+ 1ULL << VIRTIO_NET_F_MRG_RXBUF | \
+ 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \
+ 1ULL << VIRTIO_NET_F_GUEST_CSUM | \
+ 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
+ 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
+ 1ULL << VIRTIO_F_VERSION_1)
+