1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
10 #include <sys/socket.h>
12 #include <rte_malloc.h>
13 #include <rte_kvargs.h>
14 #include <rte_ethdev_vdev.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_alarm.h>
17 #include <rte_cycles.h>
19 #include "virtio_ethdev.h"
20 #include "virtio_logs.h"
21 #include "virtio_pci.h"
22 #include "virtqueue.h"
23 #include "virtio_rxtx.h"
24 #include "virtio_user/virtio_user_dev.h"
25 #include "virtio_user/vhost.h"
27 #define virtio_user_get_dev(hw) \
28 ((struct virtio_user_dev *)(hw)->virtio_user_dev)
31 virtio_user_reset_queues_packed(struct rte_eth_dev *dev)
33 struct virtio_hw *hw = dev->data->dev_private;
34 struct virtnet_rx *rxvq;
35 struct virtnet_tx *txvq;
38 /* Add lock to avoid queue contention. */
39 rte_spinlock_lock(&hw->state_lock);
43 * Waitting for datapath to complete before resetting queues.
44 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
48 /* Vring reset for each Tx queue and Rx queue. */
49 for (i = 0; i < dev->data->nb_rx_queues; i++) {
50 rxvq = dev->data->rx_queues[i];
51 virtqueue_rxvq_reset_packed(rxvq->vq);
52 virtio_dev_rx_queue_setup_finish(dev, i);
55 for (i = 0; i < dev->data->nb_tx_queues; i++) {
56 txvq = dev->data->tx_queues[i];
57 virtqueue_txvq_reset_packed(txvq->vq);
61 rte_spinlock_unlock(&hw->state_lock);
66 virtio_user_server_reconnect(struct virtio_user_dev *dev)
70 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
71 struct virtio_hw *hw = eth_dev->data->dev_private;
72 uint64_t protocol_features;
74 connectfd = accept(dev->listenfd, NULL, NULL);
78 dev->vhostfd = connectfd;
79 if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
80 &dev->device_features) < 0) {
81 PMD_INIT_LOG(ERR, "get_features failed: %s",
86 if (dev->device_features &
87 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) {
88 if (dev->ops->send_request(dev,
89 VHOST_USER_GET_PROTOCOL_FEATURES,
93 dev->protocol_features &= protocol_features;
95 if (dev->ops->send_request(dev,
96 VHOST_USER_SET_PROTOCOL_FEATURES,
97 &dev->protocol_features))
100 if (!(dev->protocol_features &
101 (1ULL << VHOST_USER_PROTOCOL_F_MQ)))
102 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
105 dev->device_features |= dev->frontend_features;
107 /* umask vhost-user unsupported features */
108 dev->device_features &= ~(dev->unsupported_features);
110 dev->features &= dev->device_features;
112 /* For packed ring, resetting queues is required in reconnection. */
113 if (vtpci_packed_queue(hw) &&
114 (vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
115 PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
116 " when packed ring reconnecting.");
117 virtio_user_reset_queues_packed(eth_dev);
120 ret = virtio_user_start_device(dev);
124 if (dev->queue_pairs > 1) {
125 ret = virtio_user_handle_mq(dev, dev->queue_pairs);
127 PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
131 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
132 if (rte_intr_disable(eth_dev->intr_handle) < 0) {
133 PMD_DRV_LOG(ERR, "interrupt disable failed");
136 rte_intr_callback_unregister(eth_dev->intr_handle,
137 virtio_interrupt_handler,
139 eth_dev->intr_handle->fd = connectfd;
140 rte_intr_callback_register(eth_dev->intr_handle,
141 virtio_interrupt_handler, eth_dev);
143 if (rte_intr_enable(eth_dev->intr_handle) < 0) {
144 PMD_DRV_LOG(ERR, "interrupt enable failed");
148 PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");
153 virtio_user_delayed_handler(void *param)
155 struct virtio_hw *hw = (struct virtio_hw *)param;
156 struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id];
157 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
159 if (rte_intr_disable(eth_dev->intr_handle) < 0) {
160 PMD_DRV_LOG(ERR, "interrupt disable failed");
163 rte_intr_callback_unregister(eth_dev->intr_handle,
164 virtio_interrupt_handler, eth_dev);
165 if (dev->is_server) {
166 if (dev->vhostfd >= 0) {
170 eth_dev->intr_handle->fd = dev->listenfd;
171 rte_intr_callback_register(eth_dev->intr_handle,
172 virtio_interrupt_handler, eth_dev);
173 if (rte_intr_enable(eth_dev->intr_handle) < 0) {
174 PMD_DRV_LOG(ERR, "interrupt enable failed");
181 virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
182 void *dst, int length)
185 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
187 if (offset == offsetof(struct virtio_net_config, mac) &&
188 length == RTE_ETHER_ADDR_LEN) {
189 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
190 ((uint8_t *)dst)[i] = dev->mac_addr[i];
194 if (offset == offsetof(struct virtio_net_config, status)) {
197 if (dev->vhostfd >= 0) {
201 flags = fcntl(dev->vhostfd, F_GETFL);
202 if (fcntl(dev->vhostfd, F_SETFL,
203 flags | O_NONBLOCK) == -1) {
204 PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag");
207 r = recv(dev->vhostfd, buf, 128, MSG_PEEK);
208 if (r == 0 || (r < 0 && errno != EAGAIN)) {
209 dev->net_status &= (~VIRTIO_NET_S_LINK_UP);
210 PMD_DRV_LOG(ERR, "virtio-user port %u is down",
213 /* This function could be called in the process
214 * of interrupt handling, callback cannot be
215 * unregistered here, set an alarm to do it.
218 virtio_user_delayed_handler,
221 dev->net_status |= VIRTIO_NET_S_LINK_UP;
223 if (fcntl(dev->vhostfd, F_SETFL,
224 flags & ~O_NONBLOCK) == -1) {
225 PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag");
228 } else if (dev->is_server) {
229 dev->net_status &= (~VIRTIO_NET_S_LINK_UP);
230 if (virtio_user_server_reconnect(dev) >= 0)
231 dev->net_status |= VIRTIO_NET_S_LINK_UP;
234 *(uint16_t *)dst = dev->net_status;
237 if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs))
238 *(uint16_t *)dst = dev->max_queue_pairs;
242 virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
243 const void *src, int length)
246 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
248 if ((offset == offsetof(struct virtio_net_config, mac)) &&
249 (length == RTE_ETHER_ADDR_LEN))
250 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
251 dev->mac_addr[i] = ((const uint8_t *)src)[i];
253 PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d",
258 virtio_user_reset(struct virtio_hw *hw)
260 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
262 if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
263 virtio_user_stop_device(dev);
267 virtio_user_set_status(struct virtio_hw *hw, uint8_t status)
269 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
271 if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
272 virtio_user_start_device(dev);
273 else if (status == VIRTIO_CONFIG_STATUS_RESET)
274 virtio_user_reset(hw);
275 dev->status = status;
276 virtio_user_send_status_update(dev, status);
280 virtio_user_get_status(struct virtio_hw *hw)
282 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
284 virtio_user_update_status(dev);
290 virtio_user_get_features(struct virtio_hw *hw)
292 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
294 /* unmask feature bits defined in vhost user protocol */
295 return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES;
299 virtio_user_set_features(struct virtio_hw *hw, uint64_t features)
301 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
303 dev->features = features & dev->device_features;
307 virtio_user_get_isr(struct virtio_hw *hw __rte_unused)
309 /* rxq interrupts and config interrupt are separated in virtio-user,
310 * here we only report config change.
312 return VIRTIO_PCI_ISR_CONFIG;
316 virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused,
317 uint16_t vec __rte_unused)
323 virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused,
324 struct virtqueue *vq __rte_unused,
327 /* pretend we have done that */
331 /* This function is to get the queue size, aka, number of descs, of a specified
332 * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the
333 * max supported queues.
336 virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
338 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
340 /* Currently, each queue has same queue size */
341 return dev->queue_size;
345 virtio_user_setup_queue_packed(struct virtqueue *vq,
346 struct virtio_user_dev *dev)
348 uint16_t queue_idx = vq->vq_queue_index;
349 struct vring_packed *vring;
355 vring = &dev->packed_vrings[queue_idx];
356 desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
357 avail_addr = desc_addr + vq->vq_nentries *
358 sizeof(struct vring_packed_desc);
359 used_addr = RTE_ALIGN_CEIL(avail_addr +
360 sizeof(struct vring_packed_desc_event),
361 VIRTIO_PCI_VRING_ALIGN);
362 vring->num = vq->vq_nentries;
363 vring->desc = (void *)(uintptr_t)desc_addr;
364 vring->driver = (void *)(uintptr_t)avail_addr;
365 vring->device = (void *)(uintptr_t)used_addr;
366 dev->packed_queues[queue_idx].avail_wrap_counter = true;
367 dev->packed_queues[queue_idx].used_wrap_counter = true;
369 for (i = 0; i < vring->num; i++)
370 vring->desc[i].flags = 0;
374 virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
376 uint16_t queue_idx = vq->vq_queue_index;
377 uint64_t desc_addr, avail_addr, used_addr;
379 desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
380 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
381 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
382 ring[vq->vq_nentries]),
383 VIRTIO_PCI_VRING_ALIGN);
385 dev->vrings[queue_idx].num = vq->vq_nentries;
386 dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
387 dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
388 dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
392 virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
394 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
396 if (vtpci_packed_queue(hw))
397 virtio_user_setup_queue_packed(vq, dev);
399 virtio_user_setup_queue_split(vq, dev);
405 virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
407 /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
408 * correspondingly stops the ioeventfds, and reset the status of
410 * For modern devices, set queue desc, avail, used in PCI bar to 0,
411 * not see any more behavior in QEMU.
413 * Here we just care about what information to deliver to vhost-user
414 * or vhost-kernel. So we just close ioeventfd for now.
416 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
418 close(dev->callfds[vq->vq_queue_index]);
419 close(dev->kickfds[vq->vq_queue_index]);
423 virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
426 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
428 if (hw->cvq && (hw->cvq->vq == vq)) {
429 if (vtpci_packed_queue(vq->hw))
430 virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
432 virtio_user_handle_cq(dev, vq->vq_queue_index);
436 if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
437 PMD_DRV_LOG(ERR, "failed to kick backend: %s",
441 const struct virtio_pci_ops virtio_user_ops = {
442 .read_dev_cfg = virtio_user_read_dev_config,
443 .write_dev_cfg = virtio_user_write_dev_config,
444 .get_status = virtio_user_get_status,
445 .set_status = virtio_user_set_status,
446 .get_features = virtio_user_get_features,
447 .set_features = virtio_user_set_features,
448 .get_isr = virtio_user_get_isr,
449 .set_config_irq = virtio_user_set_config_irq,
450 .set_queue_irq = virtio_user_set_queue_irq,
451 .get_queue_num = virtio_user_get_queue_num,
452 .setup_queue = virtio_user_setup_queue,
453 .del_queue = virtio_user_del_queue,
454 .notify_queue = virtio_user_notify_queue,
457 static const char *valid_args[] = {
458 #define VIRTIO_USER_ARG_QUEUES_NUM "queues"
459 VIRTIO_USER_ARG_QUEUES_NUM,
460 #define VIRTIO_USER_ARG_CQ_NUM "cq"
461 VIRTIO_USER_ARG_CQ_NUM,
462 #define VIRTIO_USER_ARG_MAC "mac"
464 #define VIRTIO_USER_ARG_PATH "path"
465 VIRTIO_USER_ARG_PATH,
466 #define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size"
467 VIRTIO_USER_ARG_QUEUE_SIZE,
468 #define VIRTIO_USER_ARG_INTERFACE_NAME "iface"
469 VIRTIO_USER_ARG_INTERFACE_NAME,
470 #define VIRTIO_USER_ARG_SERVER_MODE "server"
471 VIRTIO_USER_ARG_SERVER_MODE,
472 #define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf"
473 VIRTIO_USER_ARG_MRG_RXBUF,
474 #define VIRTIO_USER_ARG_IN_ORDER "in_order"
475 VIRTIO_USER_ARG_IN_ORDER,
476 #define VIRTIO_USER_ARG_PACKED_VQ "packed_vq"
477 VIRTIO_USER_ARG_PACKED_VQ,
478 #define VIRTIO_USER_ARG_SPEED "speed"
479 VIRTIO_USER_ARG_SPEED,
480 #define VIRTIO_USER_ARG_VECTORIZED "vectorized"
481 VIRTIO_USER_ARG_VECTORIZED,
485 #define VIRTIO_USER_DEF_CQ_EN 0
486 #define VIRTIO_USER_DEF_Q_NUM 1
487 #define VIRTIO_USER_DEF_Q_SZ 256
488 #define VIRTIO_USER_DEF_SERVER_MODE 0
491 get_string_arg(const char *key __rte_unused,
492 const char *value, void *extra_args)
494 if (!value || !extra_args)
497 *(char **)extra_args = strdup(value);
499 if (!*(char **)extra_args)
506 get_integer_arg(const char *key __rte_unused,
507 const char *value, void *extra_args)
509 uint64_t integer = 0;
510 if (!value || !extra_args)
513 integer = strtoull(value, NULL, 0);
514 /* extra_args keeps default value, it should be replaced
515 * only in case of successful parsing of the 'value' arg
518 *(uint64_t *)extra_args = integer;
522 static enum virtio_user_backend_type
523 virtio_user_backend_type(const char *path)
527 if (stat(path, &sb) == -1)
528 return VIRTIO_USER_BACKEND_UNKNOWN;
530 return S_ISSOCK(sb.st_mode) ?
531 VIRTIO_USER_BACKEND_VHOST_USER :
532 VIRTIO_USER_BACKEND_VHOST_KERNEL;
535 static struct rte_eth_dev *
536 virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
538 struct rte_eth_dev *eth_dev;
539 struct rte_eth_dev_data *data;
540 struct virtio_hw *hw;
541 struct virtio_user_dev *dev;
543 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*hw));
545 PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev");
549 data = eth_dev->data;
550 hw = eth_dev->data->dev_private;
552 dev = rte_zmalloc(NULL, sizeof(*dev), 0);
554 PMD_INIT_LOG(ERR, "malloc virtio_user_dev failed");
555 rte_eth_dev_release_port(eth_dev);
559 hw->port_id = data->port_id;
560 dev->port_id = data->port_id;
561 virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops;
563 * MSIX is required to enable LSC (see virtio_init_device).
564 * Here just pretend that we support msix.
570 hw->use_inorder_rx = 0;
571 hw->use_inorder_tx = 0;
572 hw->virtio_user_dev = dev;
577 virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev)
579 struct rte_eth_dev_data *data = eth_dev->data;
580 struct virtio_hw *hw = data->dev_private;
582 rte_free(hw->virtio_user_dev);
583 rte_eth_dev_release_port(eth_dev);
586 /* Dev initialization routine. Invoked once for each virtio vdev at
587 * EAL init time, see rte_bus_probe().
588 * Returns 0 on success.
591 virtio_user_pmd_probe(struct rte_vdev_device *dev)
593 struct rte_kvargs *kvlist = NULL;
594 struct rte_eth_dev *eth_dev;
595 struct virtio_hw *hw;
596 enum virtio_user_backend_type backend_type = VIRTIO_USER_BACKEND_UNKNOWN;
597 uint64_t queues = VIRTIO_USER_DEF_Q_NUM;
598 uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
599 uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
600 uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
601 uint64_t mrg_rxbuf = 1;
602 uint64_t in_order = 1;
603 uint64_t packed_vq = 0;
604 uint64_t vectorized = 0;
607 char *mac_addr = NULL;
610 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
611 const char *name = rte_vdev_device_name(dev);
612 eth_dev = rte_eth_dev_attach_secondary(name);
614 PMD_INIT_LOG(ERR, "Failed to probe %s", name);
618 if (eth_virtio_dev_init(eth_dev) < 0) {
619 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
620 rte_eth_dev_release_port(eth_dev);
624 eth_dev->dev_ops = &virtio_user_secondary_eth_dev_ops;
625 eth_dev->device = &dev->device;
626 rte_eth_dev_probing_finish(eth_dev);
630 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_args);
632 PMD_INIT_LOG(ERR, "error when parsing param");
636 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) {
637 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH,
638 &get_string_arg, &path) < 0) {
639 PMD_INIT_LOG(ERR, "error to parse %s",
640 VIRTIO_USER_ARG_PATH);
644 PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
645 VIRTIO_USER_ARG_PATH);
649 backend_type = virtio_user_backend_type(path);
650 if (backend_type == VIRTIO_USER_BACKEND_UNKNOWN) {
652 "unable to determine backend type for path %s",
658 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) {
659 if (backend_type != VIRTIO_USER_BACKEND_VHOST_KERNEL) {
661 "arg %s applies only to vhost-kernel backend",
662 VIRTIO_USER_ARG_INTERFACE_NAME);
666 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME,
667 &get_string_arg, &ifname) < 0) {
668 PMD_INIT_LOG(ERR, "error to parse %s",
669 VIRTIO_USER_ARG_INTERFACE_NAME);
674 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) {
675 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC,
676 &get_string_arg, &mac_addr) < 0) {
677 PMD_INIT_LOG(ERR, "error to parse %s",
678 VIRTIO_USER_ARG_MAC);
683 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) {
684 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE,
685 &get_integer_arg, &queue_size) < 0) {
686 PMD_INIT_LOG(ERR, "error to parse %s",
687 VIRTIO_USER_ARG_QUEUE_SIZE);
692 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) {
693 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM,
694 &get_integer_arg, &queues) < 0) {
695 PMD_INIT_LOG(ERR, "error to parse %s",
696 VIRTIO_USER_ARG_QUEUES_NUM);
701 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) {
702 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE,
703 &get_integer_arg, &server_mode) < 0) {
704 PMD_INIT_LOG(ERR, "error to parse %s",
705 VIRTIO_USER_ARG_SERVER_MODE);
710 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) {
711 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM,
712 &get_integer_arg, &cq) < 0) {
713 PMD_INIT_LOG(ERR, "error to parse %s",
714 VIRTIO_USER_ARG_CQ_NUM);
717 } else if (queues > 1) {
721 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) {
722 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ,
723 &get_integer_arg, &packed_vq) < 0) {
724 PMD_INIT_LOG(ERR, "error to parse %s",
725 VIRTIO_USER_ARG_PACKED_VQ);
730 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_VECTORIZED) == 1) {
731 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_VECTORIZED,
732 &get_integer_arg, &vectorized) < 0) {
733 PMD_INIT_LOG(ERR, "error to parse %s",
734 VIRTIO_USER_ARG_VECTORIZED);
739 if (queues > 1 && cq == 0) {
740 PMD_INIT_LOG(ERR, "multi-q requires ctrl-q");
744 if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
745 PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u",
746 VIRTIO_USER_ARG_QUEUES_NUM, queues,
747 VIRTIO_MAX_VIRTQUEUE_PAIRS);
751 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) {
752 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF,
753 &get_integer_arg, &mrg_rxbuf) < 0) {
754 PMD_INIT_LOG(ERR, "error to parse %s",
755 VIRTIO_USER_ARG_MRG_RXBUF);
760 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) {
761 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER,
762 &get_integer_arg, &in_order) < 0) {
763 PMD_INIT_LOG(ERR, "error to parse %s",
764 VIRTIO_USER_ARG_IN_ORDER);
769 eth_dev = virtio_user_eth_dev_alloc(dev);
771 PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
775 hw = eth_dev->data->dev_private;
776 if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
777 queue_size, mac_addr, &ifname, server_mode,
778 mrg_rxbuf, in_order, packed_vq, backend_type) < 0) {
779 PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
780 virtio_user_eth_dev_free(eth_dev);
784 /* previously called by pci probing for physical dev */
785 if (eth_virtio_dev_init(eth_dev) < 0) {
786 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
787 virtio_user_eth_dev_free(eth_dev);
793 #if defined(CC_AVX512_SUPPORT)
798 "building environment do not support packed ring vectorized");
805 rte_eth_dev_probing_finish(eth_dev);
810 rte_kvargs_free(kvlist);
821 virtio_user_pmd_remove(struct rte_vdev_device *vdev)
824 struct rte_eth_dev *eth_dev;
829 name = rte_vdev_device_name(vdev);
830 PMD_DRV_LOG(INFO, "Un-Initializing %s", name);
831 eth_dev = rte_eth_dev_allocated(name);
832 /* Port has already been released by close. */
836 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
837 return rte_eth_dev_release_port(eth_dev);
839 /* make sure the device is stopped, queues freed */
840 rte_eth_dev_close(eth_dev->data->port_id);
845 static int virtio_user_pmd_dma_map(struct rte_vdev_device *vdev, void *addr,
846 uint64_t iova, size_t len)
849 struct rte_eth_dev *eth_dev;
850 struct virtio_user_dev *dev;
851 struct virtio_hw *hw;
856 name = rte_vdev_device_name(vdev);
857 eth_dev = rte_eth_dev_allocated(name);
858 /* Port has already been released by close. */
862 hw = (struct virtio_hw *)eth_dev->data->dev_private;
863 dev = hw->virtio_user_dev;
865 if (dev->ops->dma_map)
866 return dev->ops->dma_map(dev, addr, iova, len);
871 static int virtio_user_pmd_dma_unmap(struct rte_vdev_device *vdev, void *addr,
872 uint64_t iova, size_t len)
875 struct rte_eth_dev *eth_dev;
876 struct virtio_user_dev *dev;
877 struct virtio_hw *hw;
882 name = rte_vdev_device_name(vdev);
883 eth_dev = rte_eth_dev_allocated(name);
884 /* Port has already been released by close. */
888 hw = (struct virtio_hw *)eth_dev->data->dev_private;
889 dev = hw->virtio_user_dev;
891 if (dev->ops->dma_unmap)
892 return dev->ops->dma_unmap(dev, addr, iova, len);
897 static struct rte_vdev_driver virtio_user_driver = {
898 .probe = virtio_user_pmd_probe,
899 .remove = virtio_user_pmd_remove,
900 .dma_map = virtio_user_pmd_dma_map,
901 .dma_unmap = virtio_user_pmd_dma_unmap,
904 RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver);
905 RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user);
906 RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,