1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
9 #include <linux/major.h>
11 #include <sys/sysmacros.h>
12 #include <sys/socket.h>
14 #include <rte_malloc.h>
15 #include <rte_kvargs.h>
16 #include <ethdev_vdev.h>
17 #include <rte_bus_vdev.h>
18 #include <rte_alarm.h>
19 #include <rte_cycles.h>
21 #include "virtio_ethdev.h"
22 #include "virtio_logs.h"
23 #include "virtio_pci.h"
24 #include "virtqueue.h"
25 #include "virtio_rxtx.h"
26 #include "virtio_user/virtio_user_dev.h"
27 #include "virtio_user/vhost.h"
29 #define virtio_user_get_dev(hwp) container_of(hwp, struct virtio_user_dev, hw)
32 virtio_user_reset_queues_packed(struct rte_eth_dev *eth_dev)
34 struct virtio_user_dev *dev = eth_dev->data->dev_private;
35 struct virtio_hw *hw = &dev->hw;
36 struct virtnet_rx *rxvq;
37 struct virtnet_tx *txvq;
40 /* Add lock to avoid queue contention. */
41 rte_spinlock_lock(&hw->state_lock);
45 * Waitting for datapath to complete before resetting queues.
46 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
50 /* Vring reset for each Tx queue and Rx queue. */
51 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
52 rxvq = eth_dev->data->rx_queues[i];
53 virtqueue_rxvq_reset_packed(rxvq->vq);
54 virtio_dev_rx_queue_setup_finish(eth_dev, i);
57 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
58 txvq = eth_dev->data->tx_queues[i];
59 virtqueue_txvq_reset_packed(txvq->vq);
63 rte_spinlock_unlock(&hw->state_lock);
68 virtio_user_server_reconnect(struct virtio_user_dev *dev)
70 int ret, connectfd, old_status;
71 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
72 struct virtio_hw *hw = &dev->hw;
73 uint64_t protocol_features;
75 connectfd = accept(dev->listenfd, NULL, NULL);
79 dev->vhostfd = connectfd;
80 old_status = dev->status;
84 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
86 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
88 if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
89 &dev->device_features) < 0) {
90 PMD_INIT_LOG(ERR, "get_features failed: %s",
95 if (dev->device_features &
96 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) {
97 if (dev->ops->send_request(dev,
98 VHOST_USER_GET_PROTOCOL_FEATURES,
102 /* Offer VHOST_USER_PROTOCOL_F_STATUS */
103 dev->protocol_features |=
104 (1ULL << VHOST_USER_PROTOCOL_F_STATUS);
105 dev->protocol_features &= protocol_features;
107 if (dev->ops->send_request(dev,
108 VHOST_USER_SET_PROTOCOL_FEATURES,
109 &dev->protocol_features))
112 if (!(dev->protocol_features &
113 (1ULL << VHOST_USER_PROTOCOL_F_MQ)))
114 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
117 dev->device_features |= dev->frontend_features;
119 /* umask vhost-user unsupported features */
120 dev->device_features &= ~(dev->unsupported_features);
122 dev->features &= dev->device_features;
124 /* For packed ring, resetting queues is required in reconnection. */
125 if (vtpci_packed_queue(hw) &&
126 (old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
127 PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
128 " when packed ring reconnecting.");
129 virtio_user_reset_queues_packed(eth_dev);
132 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
134 /* Start the device */
135 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
139 if (dev->queue_pairs > 1) {
140 ret = virtio_user_handle_mq(dev, dev->queue_pairs);
142 PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
146 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
147 if (rte_intr_disable(eth_dev->intr_handle) < 0) {
148 PMD_DRV_LOG(ERR, "interrupt disable failed");
151 rte_intr_callback_unregister(eth_dev->intr_handle,
152 virtio_interrupt_handler,
154 eth_dev->intr_handle->fd = connectfd;
155 rte_intr_callback_register(eth_dev->intr_handle,
156 virtio_interrupt_handler, eth_dev);
158 if (rte_intr_enable(eth_dev->intr_handle) < 0) {
159 PMD_DRV_LOG(ERR, "interrupt enable failed");
163 PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");
168 virtio_user_delayed_handler(void *param)
170 struct virtio_hw *hw = (struct virtio_hw *)param;
171 struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id];
172 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
174 if (rte_intr_disable(eth_dev->intr_handle) < 0) {
175 PMD_DRV_LOG(ERR, "interrupt disable failed");
178 rte_intr_callback_unregister(eth_dev->intr_handle,
179 virtio_interrupt_handler, eth_dev);
180 if (dev->is_server) {
181 if (dev->vhostfd >= 0) {
184 /* Until the featuers are negotiated again, don't assume
185 * the backend supports VHOST_USER_PROTOCOL_F_STATUS
187 dev->protocol_features &=
188 ~(1ULL << VHOST_USER_PROTOCOL_F_STATUS);
190 eth_dev->intr_handle->fd = dev->listenfd;
191 rte_intr_callback_register(eth_dev->intr_handle,
192 virtio_interrupt_handler, eth_dev);
193 if (rte_intr_enable(eth_dev->intr_handle) < 0) {
194 PMD_DRV_LOG(ERR, "interrupt enable failed");
201 virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
202 void *dst, int length)
205 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
207 if (offset == offsetof(struct virtio_net_config, mac) &&
208 length == RTE_ETHER_ADDR_LEN) {
209 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
210 ((uint8_t *)dst)[i] = dev->mac_addr[i];
214 if (offset == offsetof(struct virtio_net_config, status)) {
217 if (dev->vhostfd >= 0) {
221 flags = fcntl(dev->vhostfd, F_GETFL);
222 if (fcntl(dev->vhostfd, F_SETFL,
223 flags | O_NONBLOCK) == -1) {
224 PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag");
227 r = recv(dev->vhostfd, buf, 128, MSG_PEEK);
228 if (r == 0 || (r < 0 && errno != EAGAIN)) {
229 dev->net_status &= (~VIRTIO_NET_S_LINK_UP);
230 PMD_DRV_LOG(ERR, "virtio-user port %u is down",
233 /* This function could be called in the process
234 * of interrupt handling, callback cannot be
235 * unregistered here, set an alarm to do it.
238 virtio_user_delayed_handler,
241 dev->net_status |= VIRTIO_NET_S_LINK_UP;
243 if (fcntl(dev->vhostfd, F_SETFL,
244 flags & ~O_NONBLOCK) == -1) {
245 PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag");
248 } else if (dev->is_server) {
249 dev->net_status &= (~VIRTIO_NET_S_LINK_UP);
250 if (virtio_user_server_reconnect(dev) >= 0)
251 dev->net_status |= VIRTIO_NET_S_LINK_UP;
254 *(uint16_t *)dst = dev->net_status;
257 if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs))
258 *(uint16_t *)dst = dev->max_queue_pairs;
262 virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
263 const void *src, int length)
266 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
268 if ((offset == offsetof(struct virtio_net_config, mac)) &&
269 (length == RTE_ETHER_ADDR_LEN))
270 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
271 dev->mac_addr[i] = ((const uint8_t *)src)[i];
273 PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d",
278 virtio_user_reset(struct virtio_hw *hw)
280 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
282 if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
283 virtio_user_stop_device(dev);
287 virtio_user_set_status(struct virtio_hw *hw, uint8_t status)
289 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
290 uint8_t old_status = dev->status;
292 if (status & VIRTIO_CONFIG_STATUS_FEATURES_OK &&
293 ~old_status & VIRTIO_CONFIG_STATUS_FEATURES_OK)
294 virtio_user_dev_set_features(dev);
295 if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
296 virtio_user_start_device(dev);
297 else if (status == VIRTIO_CONFIG_STATUS_RESET)
298 virtio_user_reset(hw);
300 virtio_user_dev_set_status(dev, status);
304 virtio_user_get_status(struct virtio_hw *hw)
306 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
308 virtio_user_dev_update_status(dev);
314 virtio_user_get_features(struct virtio_hw *hw)
316 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
318 /* unmask feature bits defined in vhost user protocol */
319 return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES;
323 virtio_user_set_features(struct virtio_hw *hw, uint64_t features)
325 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
327 dev->features = features & dev->device_features;
331 virtio_user_features_ok(struct virtio_hw *hw __rte_unused)
337 virtio_user_get_isr(struct virtio_hw *hw __rte_unused)
339 /* rxq interrupts and config interrupt are separated in virtio-user,
340 * here we only report config change.
342 return VIRTIO_PCI_ISR_CONFIG;
346 virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused,
347 uint16_t vec __rte_unused)
353 virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused,
354 struct virtqueue *vq __rte_unused,
357 /* pretend we have done that */
361 /* This function is to get the queue size, aka, number of descs, of a specified
362 * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the
363 * max supported queues.
366 virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
368 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
370 /* Currently, each queue has same queue size */
371 return dev->queue_size;
375 virtio_user_setup_queue_packed(struct virtqueue *vq,
376 struct virtio_user_dev *dev)
378 uint16_t queue_idx = vq->vq_queue_index;
379 struct vring_packed *vring;
385 vring = &dev->packed_vrings[queue_idx];
386 desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
387 avail_addr = desc_addr + vq->vq_nentries *
388 sizeof(struct vring_packed_desc);
389 used_addr = RTE_ALIGN_CEIL(avail_addr +
390 sizeof(struct vring_packed_desc_event),
391 VIRTIO_PCI_VRING_ALIGN);
392 vring->num = vq->vq_nentries;
393 vring->desc = (void *)(uintptr_t)desc_addr;
394 vring->driver = (void *)(uintptr_t)avail_addr;
395 vring->device = (void *)(uintptr_t)used_addr;
396 dev->packed_queues[queue_idx].avail_wrap_counter = true;
397 dev->packed_queues[queue_idx].used_wrap_counter = true;
399 for (i = 0; i < vring->num; i++)
400 vring->desc[i].flags = 0;
404 virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
406 uint16_t queue_idx = vq->vq_queue_index;
407 uint64_t desc_addr, avail_addr, used_addr;
409 desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
410 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
411 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
412 ring[vq->vq_nentries]),
413 VIRTIO_PCI_VRING_ALIGN);
415 dev->vrings[queue_idx].num = vq->vq_nentries;
416 dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
417 dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
418 dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
422 virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
424 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
426 if (vtpci_packed_queue(hw))
427 virtio_user_setup_queue_packed(vq, dev);
429 virtio_user_setup_queue_split(vq, dev);
435 virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
437 /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
438 * correspondingly stops the ioeventfds, and reset the status of
440 * For modern devices, set queue desc, avail, used in PCI bar to 0,
441 * not see any more behavior in QEMU.
443 * Here we just care about what information to deliver to vhost-user
444 * or vhost-kernel. So we just close ioeventfd for now.
446 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
448 close(dev->callfds[vq->vq_queue_index]);
449 close(dev->kickfds[vq->vq_queue_index]);
453 virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
456 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
458 if (hw->cvq && (hw->cvq->vq == vq)) {
459 if (vtpci_packed_queue(vq->hw))
460 virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
462 virtio_user_handle_cq(dev, vq->vq_queue_index);
466 if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
467 PMD_DRV_LOG(ERR, "failed to kick backend: %s",
472 virtio_user_dev_close(struct virtio_hw *hw)
474 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
476 virtio_user_dev_uninit(dev);
481 const struct virtio_pci_ops virtio_user_ops = {
482 .read_dev_cfg = virtio_user_read_dev_config,
483 .write_dev_cfg = virtio_user_write_dev_config,
484 .get_status = virtio_user_get_status,
485 .set_status = virtio_user_set_status,
486 .get_features = virtio_user_get_features,
487 .set_features = virtio_user_set_features,
488 .features_ok = virtio_user_features_ok,
489 .get_isr = virtio_user_get_isr,
490 .set_config_irq = virtio_user_set_config_irq,
491 .set_queue_irq = virtio_user_set_queue_irq,
492 .get_queue_num = virtio_user_get_queue_num,
493 .setup_queue = virtio_user_setup_queue,
494 .del_queue = virtio_user_del_queue,
495 .notify_queue = virtio_user_notify_queue,
496 .dev_close = virtio_user_dev_close,
499 static const char *valid_args[] = {
500 #define VIRTIO_USER_ARG_QUEUES_NUM "queues"
501 VIRTIO_USER_ARG_QUEUES_NUM,
502 #define VIRTIO_USER_ARG_CQ_NUM "cq"
503 VIRTIO_USER_ARG_CQ_NUM,
504 #define VIRTIO_USER_ARG_MAC "mac"
506 #define VIRTIO_USER_ARG_PATH "path"
507 VIRTIO_USER_ARG_PATH,
508 #define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size"
509 VIRTIO_USER_ARG_QUEUE_SIZE,
510 #define VIRTIO_USER_ARG_INTERFACE_NAME "iface"
511 VIRTIO_USER_ARG_INTERFACE_NAME,
512 #define VIRTIO_USER_ARG_SERVER_MODE "server"
513 VIRTIO_USER_ARG_SERVER_MODE,
514 #define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf"
515 VIRTIO_USER_ARG_MRG_RXBUF,
516 #define VIRTIO_USER_ARG_IN_ORDER "in_order"
517 VIRTIO_USER_ARG_IN_ORDER,
518 #define VIRTIO_USER_ARG_PACKED_VQ "packed_vq"
519 VIRTIO_USER_ARG_PACKED_VQ,
520 #define VIRTIO_USER_ARG_SPEED "speed"
521 VIRTIO_USER_ARG_SPEED,
522 #define VIRTIO_USER_ARG_VECTORIZED "vectorized"
523 VIRTIO_USER_ARG_VECTORIZED,
527 #define VIRTIO_USER_DEF_CQ_EN 0
528 #define VIRTIO_USER_DEF_Q_NUM 1
529 #define VIRTIO_USER_DEF_Q_SZ 256
530 #define VIRTIO_USER_DEF_SERVER_MODE 0
533 get_string_arg(const char *key __rte_unused,
534 const char *value, void *extra_args)
536 if (!value || !extra_args)
539 *(char **)extra_args = strdup(value);
541 if (!*(char **)extra_args)
548 get_integer_arg(const char *key __rte_unused,
549 const char *value, void *extra_args)
551 uint64_t integer = 0;
552 if (!value || !extra_args)
555 integer = strtoull(value, NULL, 0);
556 /* extra_args keeps default value, it should be replaced
557 * only in case of successful parsing of the 'value' arg
560 *(uint64_t *)extra_args = integer;
565 vdpa_dynamic_major_num(void)
574 fp = fopen("/proc/devices", "r");
576 PMD_INIT_LOG(ERR, "Cannot open /proc/devices: %s",
578 return UNNAMED_MAJOR;
581 while (getline(&line, &size, fp) > 0) {
582 char *stripped = line + strspn(line, " ");
583 if ((sscanf(stripped, "%u %10s", &num, name) == 2) &&
584 (strncmp(name, "vhost-vdpa", 10) == 0)) {
590 return found ? num : UNNAMED_MAJOR;
593 static enum virtio_user_backend_type
594 virtio_user_backend_type(const char *path)
598 if (stat(path, &sb) == -1) {
600 return VIRTIO_USER_BACKEND_VHOST_USER;
602 PMD_INIT_LOG(ERR, "Stat fails: %s (%s)\n", path,
604 return VIRTIO_USER_BACKEND_UNKNOWN;
607 if (S_ISSOCK(sb.st_mode)) {
608 return VIRTIO_USER_BACKEND_VHOST_USER;
609 } else if (S_ISCHR(sb.st_mode)) {
610 if (major(sb.st_rdev) == MISC_MAJOR)
611 return VIRTIO_USER_BACKEND_VHOST_KERNEL;
612 if (major(sb.st_rdev) == vdpa_dynamic_major_num())
613 return VIRTIO_USER_BACKEND_VHOST_VDPA;
615 return VIRTIO_USER_BACKEND_UNKNOWN;
618 static struct rte_eth_dev *
619 virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
621 struct rte_eth_dev *eth_dev;
622 struct rte_eth_dev_data *data;
623 struct virtio_hw *hw;
624 struct virtio_user_dev *dev;
626 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*dev));
628 PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev");
632 data = eth_dev->data;
633 dev = eth_dev->data->dev_private;
636 hw->port_id = data->port_id;
637 dev->port_id = data->port_id;
638 virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops;
640 * MSIX is required to enable LSC (see virtio_init_device).
641 * Here just pretend that we support msix.
644 hw->bus_type = VIRTIO_BUS_USER;
647 hw->use_inorder_rx = 0;
648 hw->use_inorder_tx = 0;
654 virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev)
656 rte_eth_dev_release_port(eth_dev);
659 /* Dev initialization routine. Invoked once for each virtio vdev at
660 * EAL init time, see rte_bus_probe().
661 * Returns 0 on success.
664 virtio_user_pmd_probe(struct rte_vdev_device *vdev)
666 struct rte_kvargs *kvlist = NULL;
667 struct rte_eth_dev *eth_dev;
668 struct virtio_hw *hw;
669 struct virtio_user_dev *dev;
670 enum virtio_user_backend_type backend_type = VIRTIO_USER_BACKEND_UNKNOWN;
671 uint64_t queues = VIRTIO_USER_DEF_Q_NUM;
672 uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
673 uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
674 uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
675 uint64_t mrg_rxbuf = 1;
676 uint64_t in_order = 1;
677 uint64_t packed_vq = 0;
678 uint64_t vectorized = 0;
681 char *mac_addr = NULL;
684 RTE_BUILD_BUG_ON(offsetof(struct virtio_user_dev, hw) != 0);
686 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
687 const char *name = rte_vdev_device_name(vdev);
688 eth_dev = rte_eth_dev_attach_secondary(name);
690 PMD_INIT_LOG(ERR, "Failed to probe %s", name);
694 if (eth_virtio_dev_init(eth_dev) < 0) {
695 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
696 rte_eth_dev_release_port(eth_dev);
700 eth_dev->dev_ops = &virtio_user_secondary_eth_dev_ops;
701 eth_dev->device = &vdev->device;
702 rte_eth_dev_probing_finish(eth_dev);
706 kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_args);
708 PMD_INIT_LOG(ERR, "error when parsing param");
712 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) {
713 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH,
714 &get_string_arg, &path) < 0) {
715 PMD_INIT_LOG(ERR, "error to parse %s",
716 VIRTIO_USER_ARG_PATH);
720 PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
721 VIRTIO_USER_ARG_PATH);
725 backend_type = virtio_user_backend_type(path);
726 if (backend_type == VIRTIO_USER_BACKEND_UNKNOWN) {
728 "unable to determine backend type for path %s",
732 PMD_INIT_LOG(INFO, "Backend type detected: %s",
733 virtio_user_backend_strings[backend_type]);
735 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) {
736 if (backend_type != VIRTIO_USER_BACKEND_VHOST_KERNEL) {
738 "arg %s applies only to vhost-kernel backend",
739 VIRTIO_USER_ARG_INTERFACE_NAME);
743 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME,
744 &get_string_arg, &ifname) < 0) {
745 PMD_INIT_LOG(ERR, "error to parse %s",
746 VIRTIO_USER_ARG_INTERFACE_NAME);
751 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) {
752 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC,
753 &get_string_arg, &mac_addr) < 0) {
754 PMD_INIT_LOG(ERR, "error to parse %s",
755 VIRTIO_USER_ARG_MAC);
760 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) {
761 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE,
762 &get_integer_arg, &queue_size) < 0) {
763 PMD_INIT_LOG(ERR, "error to parse %s",
764 VIRTIO_USER_ARG_QUEUE_SIZE);
769 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) {
770 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM,
771 &get_integer_arg, &queues) < 0) {
772 PMD_INIT_LOG(ERR, "error to parse %s",
773 VIRTIO_USER_ARG_QUEUES_NUM);
778 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) {
779 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE,
780 &get_integer_arg, &server_mode) < 0) {
781 PMD_INIT_LOG(ERR, "error to parse %s",
782 VIRTIO_USER_ARG_SERVER_MODE);
787 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) {
788 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM,
789 &get_integer_arg, &cq) < 0) {
790 PMD_INIT_LOG(ERR, "error to parse %s",
791 VIRTIO_USER_ARG_CQ_NUM);
794 } else if (queues > 1) {
798 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) {
799 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ,
800 &get_integer_arg, &packed_vq) < 0) {
801 PMD_INIT_LOG(ERR, "error to parse %s",
802 VIRTIO_USER_ARG_PACKED_VQ);
807 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_VECTORIZED) == 1) {
808 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_VECTORIZED,
809 &get_integer_arg, &vectorized) < 0) {
810 PMD_INIT_LOG(ERR, "error to parse %s",
811 VIRTIO_USER_ARG_VECTORIZED);
816 if (queues > 1 && cq == 0) {
817 PMD_INIT_LOG(ERR, "multi-q requires ctrl-q");
821 if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
822 PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u",
823 VIRTIO_USER_ARG_QUEUES_NUM, queues,
824 VIRTIO_MAX_VIRTQUEUE_PAIRS);
828 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) {
829 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF,
830 &get_integer_arg, &mrg_rxbuf) < 0) {
831 PMD_INIT_LOG(ERR, "error to parse %s",
832 VIRTIO_USER_ARG_MRG_RXBUF);
837 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) {
838 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER,
839 &get_integer_arg, &in_order) < 0) {
840 PMD_INIT_LOG(ERR, "error to parse %s",
841 VIRTIO_USER_ARG_IN_ORDER);
846 eth_dev = virtio_user_eth_dev_alloc(vdev);
848 PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
852 dev = eth_dev->data->dev_private;
854 if (virtio_user_dev_init(dev, path, queues, cq,
855 queue_size, mac_addr, &ifname, server_mode,
856 mrg_rxbuf, in_order, packed_vq, backend_type) < 0) {
857 PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
858 virtio_user_eth_dev_free(eth_dev);
862 /* previously called by pci probing for physical dev */
863 if (eth_virtio_dev_init(eth_dev) < 0) {
864 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
865 virtio_user_eth_dev_free(eth_dev);
871 #if defined(CC_AVX512_SUPPORT) || defined(RTE_ARCH_ARM)
876 "building environment do not support packed ring vectorized");
883 rte_eth_dev_probing_finish(eth_dev);
888 rte_kvargs_free(kvlist);
899 virtio_user_pmd_remove(struct rte_vdev_device *vdev)
902 struct rte_eth_dev *eth_dev;
907 name = rte_vdev_device_name(vdev);
908 PMD_DRV_LOG(INFO, "Un-Initializing %s", name);
909 eth_dev = rte_eth_dev_allocated(name);
910 /* Port has already been released by close. */
914 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
915 return rte_eth_dev_release_port(eth_dev);
917 /* make sure the device is stopped, queues freed */
918 return rte_eth_dev_close(eth_dev->data->port_id);
921 static int virtio_user_pmd_dma_map(struct rte_vdev_device *vdev, void *addr,
922 uint64_t iova, size_t len)
925 struct rte_eth_dev *eth_dev;
926 struct virtio_user_dev *dev;
931 name = rte_vdev_device_name(vdev);
932 eth_dev = rte_eth_dev_allocated(name);
933 /* Port has already been released by close. */
937 dev = eth_dev->data->dev_private;
939 if (dev->ops->dma_map)
940 return dev->ops->dma_map(dev, addr, iova, len);
945 static int virtio_user_pmd_dma_unmap(struct rte_vdev_device *vdev, void *addr,
946 uint64_t iova, size_t len)
949 struct rte_eth_dev *eth_dev;
950 struct virtio_user_dev *dev;
955 name = rte_vdev_device_name(vdev);
956 eth_dev = rte_eth_dev_allocated(name);
957 /* Port has already been released by close. */
961 dev = eth_dev->data->dev_private;
963 if (dev->ops->dma_unmap)
964 return dev->ops->dma_unmap(dev, addr, iova, len);
969 static struct rte_vdev_driver virtio_user_driver = {
970 .probe = virtio_user_pmd_probe,
971 .remove = virtio_user_pmd_remove,
972 .dma_map = virtio_user_pmd_dma_map,
973 .dma_unmap = virtio_user_pmd_dma_unmap,
974 .drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
977 RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver);
978 RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user);
979 RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,