1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
12 #include <sys/eventfd.h>
13 #include <sys/types.h>
16 #include <rte_eal_memconfig.h>
19 #include "virtio_user_dev.h"
20 #include "../virtio_ethdev.h"
22 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
25 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
27 /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
28 * firstly because vhost depends on this msg to allocate virtqueue
31 struct vhost_vring_file file;
33 file.index = queue_sel;
34 file.fd = dev->callfds[queue_sel];
35 dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file);
41 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
43 struct vhost_vring_file file;
44 struct vhost_vring_state state;
45 struct vring *vring = &dev->vrings[queue_sel];
46 struct vhost_vring_addr addr = {
48 .desc_user_addr = (uint64_t)(uintptr_t)vring->desc,
49 .avail_user_addr = (uint64_t)(uintptr_t)vring->avail,
50 .used_user_addr = (uint64_t)(uintptr_t)vring->used,
52 .flags = 0, /* disable log */
55 state.index = queue_sel;
56 state.num = vring->num;
57 dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
59 state.index = queue_sel;
60 state.num = 0; /* no reservation */
61 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
62 state.num |= (1 << 15);
63 dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
65 dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
67 /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
68 * lastly because vhost depends on this msg to judge if
71 file.index = queue_sel;
72 file.fd = dev->kickfds[queue_sel];
73 dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file);
79 virtio_user_queue_setup(struct virtio_user_dev *dev,
80 int (*fn)(struct virtio_user_dev *, uint32_t))
82 uint32_t i, queue_sel;
84 for (i = 0; i < dev->max_queue_pairs; ++i) {
85 queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
86 if (fn(dev, queue_sel) < 0) {
87 PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i);
91 for (i = 0; i < dev->max_queue_pairs; ++i) {
92 queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
93 if (fn(dev, queue_sel) < 0) {
94 PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i);
103 is_vhost_user_by_type(const char *path)
107 if (stat(path, &sb) == -1)
110 return S_ISSOCK(sb.st_mode);
114 virtio_user_start_device(struct virtio_user_dev *dev)
116 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
123 * We need to make sure that the locks will be
124 * taken in the correct order to avoid deadlocks.
126 * Before releasing this lock, this thread should
127 * not trigger any memory hotplug events.
129 * This is a temporary workaround, and should be
130 * replaced when we get proper supports from the
131 * memory subsystem in the future.
133 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
134 pthread_mutex_lock(&dev->mutex);
136 if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
139 /* Step 0: tell vhost to create queues */
140 if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
143 /* Step 1: set features */
144 features = dev->features;
145 /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
146 features &= ~(1ull << VIRTIO_NET_F_MAC);
147 /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
148 features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
149 features &= ~(1ull << VIRTIO_NET_F_STATUS);
150 ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
153 PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
155 /* Step 2: share memory regions */
156 ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
160 /* Step 3: kick queues */
161 if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0)
164 /* Step 4: enable queues
165 * we enable the 1st queue pair by default.
167 dev->ops->enable_qp(dev, 0, 1);
170 pthread_mutex_unlock(&dev->mutex);
171 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
175 pthread_mutex_unlock(&dev->mutex);
176 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
177 /* TODO: free resource here or caller to check */
181 int virtio_user_stop_device(struct virtio_user_dev *dev)
183 struct vhost_vring_state state;
187 pthread_mutex_lock(&dev->mutex);
191 for (i = 0; i < dev->max_queue_pairs; ++i)
192 dev->ops->enable_qp(dev, i, 0);
194 /* Stop the backend. */
195 for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
197 if (dev->ops->send_request(dev, VHOST_USER_GET_VRING_BASE,
199 PMD_DRV_LOG(ERR, "get_vring_base failed, index=%u\n",
206 dev->started = false;
208 pthread_mutex_unlock(&dev->mutex);
214 parse_mac(struct virtio_user_dev *dev, const char *mac)
217 uint32_t tmp[ETHER_ADDR_LEN];
222 r = sscanf(mac, "%x:%x:%x:%x:%x:%x", &tmp[0],
223 &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5]);
224 if (r == ETHER_ADDR_LEN) {
225 for (i = 0; i < ETHER_ADDR_LEN; ++i)
226 dev->mac_addr[i] = (uint8_t)tmp[i];
227 dev->mac_specified = 1;
229 /* ignore the wrong mac, use random mac */
230 PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac);
235 virtio_user_dev_init_notify(struct virtio_user_dev *dev)
241 for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
242 if (i >= dev->max_queue_pairs * 2) {
243 dev->kickfds[i] = -1;
244 dev->callfds[i] = -1;
248 /* May use invalid flag, but some backend uses kickfd and
249 * callfd as criteria to judge if dev is alive. so finally we
252 callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
254 PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
257 kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
259 PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
262 dev->callfds[i] = callfd;
263 dev->kickfds[i] = kickfd;
266 if (i < VIRTIO_MAX_VIRTQUEUES) {
267 for (j = 0; j <= i; ++j) {
268 close(dev->callfds[j]);
269 close(dev->kickfds[j]);
279 virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
282 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
284 if (!eth_dev->intr_handle) {
285 eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle));
286 if (!eth_dev->intr_handle) {
287 PMD_DRV_LOG(ERR, "fail to allocate intr_handle");
290 memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle));
293 for (i = 0; i < dev->max_queue_pairs; ++i)
294 eth_dev->intr_handle->efds[i] = dev->callfds[i];
295 eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
296 eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
297 eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
298 /* For virtio vdev, no need to read counter for clean */
299 eth_dev->intr_handle->efd_counter_size = 0;
300 eth_dev->intr_handle->fd = -1;
301 if (dev->vhostfd >= 0)
302 eth_dev->intr_handle->fd = dev->vhostfd;
303 else if (dev->is_server)
304 eth_dev->intr_handle->fd = dev->listenfd;
310 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
311 const void *addr __rte_unused,
312 size_t len __rte_unused,
315 struct virtio_user_dev *dev = arg;
316 struct rte_memseg_list *msl;
319 /* ignore externally allocated memory */
320 msl = rte_mem_virt2memseg_list(addr);
324 pthread_mutex_lock(&dev->mutex);
326 if (dev->started == false)
329 /* Step 1: pause the active queues */
330 for (i = 0; i < dev->queue_pairs; i++)
331 dev->ops->enable_qp(dev, i, 0);
333 /* Step 2: update memory regions */
334 dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
336 /* Step 3: resume the active queues */
337 for (i = 0; i < dev->queue_pairs; i++)
338 dev->ops->enable_qp(dev, i, 1);
341 pthread_mutex_unlock(&dev->mutex);
345 virtio_user_dev_setup(struct virtio_user_dev *dev)
350 dev->vhostfds = NULL;
353 if (dev->is_server) {
354 if (access(dev->path, F_OK) == 0 &&
355 !is_vhost_user_by_type(dev->path)) {
356 PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!");
359 dev->ops = &virtio_ops_user;
361 if (is_vhost_user_by_type(dev->path)) {
362 dev->ops = &virtio_ops_user;
364 dev->ops = &virtio_ops_kernel;
366 dev->vhostfds = malloc(dev->max_queue_pairs *
368 dev->tapfds = malloc(dev->max_queue_pairs *
370 if (!dev->vhostfds || !dev->tapfds) {
371 PMD_INIT_LOG(ERR, "Failed to malloc");
375 for (q = 0; q < dev->max_queue_pairs; ++q) {
376 dev->vhostfds[q] = -1;
382 if (dev->ops->setup(dev) < 0)
385 if (virtio_user_dev_init_notify(dev) < 0)
388 if (virtio_user_fill_intr_handle(dev) < 0)
394 /* Use below macro to filter features from vhost backend */
395 #define VIRTIO_USER_SUPPORTED_FEATURES \
396 (1ULL << VIRTIO_NET_F_MAC | \
397 1ULL << VIRTIO_NET_F_STATUS | \
398 1ULL << VIRTIO_NET_F_MQ | \
399 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \
400 1ULL << VIRTIO_NET_F_CTRL_VQ | \
401 1ULL << VIRTIO_NET_F_CTRL_RX | \
402 1ULL << VIRTIO_NET_F_CTRL_VLAN | \
403 1ULL << VIRTIO_NET_F_CSUM | \
404 1ULL << VIRTIO_NET_F_HOST_TSO4 | \
405 1ULL << VIRTIO_NET_F_HOST_TSO6 | \
406 1ULL << VIRTIO_NET_F_MRG_RXBUF | \
407 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \
408 1ULL << VIRTIO_NET_F_GUEST_CSUM | \
409 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
410 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
411 1ULL << VIRTIO_F_IN_ORDER | \
412 1ULL << VIRTIO_F_VERSION_1 | \
413 1ULL << VIRTIO_F_RING_PACKED)
416 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
417 int cq, int queue_size, const char *mac, char **ifname,
418 int mrg_rxbuf, int in_order, int packed_vq)
420 pthread_mutex_init(&dev->mutex, NULL);
421 snprintf(dev->path, PATH_MAX, "%s", path);
423 dev->max_queue_pairs = queues;
424 dev->queue_pairs = 1; /* mq disabled by default */
425 dev->queue_size = queue_size;
426 dev->mac_specified = 0;
427 dev->frontend_features = 0;
428 dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES;
432 dev->ifname = *ifname;
436 if (virtio_user_dev_setup(dev) < 0) {
437 PMD_INIT_LOG(ERR, "backend set up fails");
441 if (!dev->is_server) {
442 if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER,
444 PMD_INIT_LOG(ERR, "set_owner fails: %s",
449 if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
450 &dev->device_features) < 0) {
451 PMD_INIT_LOG(ERR, "get_features failed: %s",
456 /* We just pretend vhost-user can support all these features.
457 * Note that this could be problematic that if some feature is
458 * negotiated but not supported by the vhost-user which comes
461 dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
465 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
468 dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
472 PMD_INIT_LOG(ERR, "control vq not supported yet with "
473 "packed virtqueues\n");
477 dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
480 if (dev->mac_specified)
481 dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
483 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
486 /* device does not really need to know anything about CQ,
487 * so if necessary, we just claim to support CQ
489 dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
491 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
492 /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */
493 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
494 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
495 dev->unsupported_features |=
496 (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
497 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
498 dev->unsupported_features |=
499 (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
502 /* The backend will not report this feature, we add it explicitly */
503 if (is_vhost_user_by_type(dev->path))
504 dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS);
508 * (frontend_features | backend_features) & ~unsupported_features;
510 dev->device_features |= dev->frontend_features;
511 dev->device_features &= ~dev->unsupported_features;
513 if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
514 virtio_user_mem_event_cb, dev)) {
515 if (rte_errno != ENOTSUP) {
516 PMD_INIT_LOG(ERR, "Failed to register mem event"
526 virtio_user_dev_uninit(struct virtio_user_dev *dev)
530 virtio_user_stop_device(dev);
532 rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
534 for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
535 close(dev->callfds[i]);
536 close(dev->kickfds[i]);
541 if (dev->is_server && dev->listenfd >= 0) {
542 close(dev->listenfd);
547 for (i = 0; i < dev->max_queue_pairs; ++i)
548 close(dev->vhostfds[i]);
560 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
565 if (q_pairs > dev->max_queue_pairs) {
566 PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported",
567 q_pairs, dev->max_queue_pairs);
571 /* Server mode can't enable queue pairs if vhostfd is invalid,
572 * always return 0 in this case.
574 if (!dev->is_server || dev->vhostfd >= 0) {
575 for (i = 0; i < q_pairs; ++i)
576 ret |= dev->ops->enable_qp(dev, i, 1);
577 for (i = q_pairs; i < dev->max_queue_pairs; ++i)
578 ret |= dev->ops->enable_qp(dev, i, 0);
580 dev->queue_pairs = q_pairs;
586 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
589 struct virtio_net_ctrl_hdr *hdr;
590 virtio_net_ctrl_ack status = ~0;
591 uint16_t i, idx_data, idx_status;
592 uint32_t n_descs = 0;
594 /* locate desc for header, data, and status */
595 idx_data = vring->desc[idx_hdr].next;
599 while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
600 i = vring->desc[i].next;
604 /* locate desc for status */
608 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
609 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
610 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
613 queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
614 status = virtio_user_handle_mq(dev, queues);
618 *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
624 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
626 uint16_t avail_idx, desc_idx;
627 struct vring_used_elem *uep;
629 struct vring *vring = &dev->vrings[queue_idx];
631 /* Consume avail ring, using used ring idx as first one */
632 while (vring->used->idx != vring->avail->idx) {
633 avail_idx = (vring->used->idx) & (vring->num - 1);
634 desc_idx = vring->avail->ring[avail_idx];
636 n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
638 /* Update used ring */
639 uep = &vring->used->ring[avail_idx];