net/bnxt: refactor multi-queue Rx configuration
[dpdk.git] / drivers / net / virtio / virtio_user / virtio_user_dev.c
index 95204ea..364f43e 100644 (file)
@@ -13,6 +13,7 @@
 #include <sys/types.h>
 #include <sys/stat.h>
 
+#include <rte_alarm.h>
 #include <rte_string_fns.h>
 #include <rte_eal_memconfig.h>
 
@@ -144,10 +145,6 @@ virtio_user_dev_set_features(struct virtio_user_dev *dev)
 
        pthread_mutex_lock(&dev->mutex);
 
-       if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER &&
-                       dev->vhostfd < 0)
-               goto error;
-
        /* Step 0: tell vhost to create queues */
        if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
                goto error;
@@ -190,11 +187,6 @@ virtio_user_start_device(struct virtio_user_dev *dev)
        rte_mcfg_mem_read_lock();
        pthread_mutex_lock(&dev->mutex);
 
-       /* Vhost-user client not connected yet, will start later */
-       if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER &&
-                       dev->vhostfd < 0)
-               goto out;
-
        /* Step 2: share memory regions */
        ret = dev->ops->set_memory_table(dev);
        if (ret < 0)
@@ -213,7 +205,7 @@ virtio_user_start_device(struct virtio_user_dev *dev)
                goto error;
 
        dev->started = true;
-out:
+
        pthread_mutex_unlock(&dev->mutex);
        rte_mcfg_mem_read_unlock();
 
@@ -292,13 +284,7 @@ virtio_user_dev_init_notify(struct virtio_user_dev *dev)
        int callfd;
        int kickfd;
 
-       for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
-               if (i >= dev->max_queue_pairs * 2) {
-                       dev->kickfds[i] = -1;
-                       dev->callfds[i] = -1;
-                       continue;
-               }
-
+       for (i = 0; i < dev->max_queue_pairs * 2; i++) {
                /* May use invalid flag, but some backend uses kickfd and
                 * callfd as criteria to judge if dev is alive. so finally we
                 * use real event_fd.
@@ -306,35 +292,56 @@ virtio_user_dev_init_notify(struct virtio_user_dev *dev)
                callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
                if (callfd < 0) {
                        PMD_DRV_LOG(ERR, "(%s) callfd error, %s", dev->path, strerror(errno));
-                       break;
+                       goto err;
                }
                kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
                if (kickfd < 0) {
                        close(callfd);
                        PMD_DRV_LOG(ERR, "(%s) kickfd error, %s", dev->path, strerror(errno));
-                       break;
+                       goto err;
                }
                dev->callfds[i] = callfd;
                dev->kickfds[i] = kickfd;
        }
 
-       if (i < VIRTIO_MAX_VIRTQUEUES) {
-               for (j = 0; j < i; ++j) {
-                       close(dev->callfds[j]);
+       return 0;
+err:
+       for (j = 0; j < i; j++) {
+               if (dev->kickfds[j] >= 0) {
                        close(dev->kickfds[j]);
+                       dev->kickfds[j] = -1;
+               }
+               if (dev->callfds[j] >= 0) {
+                       close(dev->callfds[j]);
+                       dev->callfds[j] = -1;
                }
-
-               return -1;
        }
 
-       return 0;
+       return -1;
+}
+
+static void
+virtio_user_dev_uninit_notify(struct virtio_user_dev *dev)
+{
+       uint32_t i;
+
+       for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
+               if (dev->kickfds[i] >= 0) {
+                       close(dev->kickfds[i]);
+                       dev->kickfds[i] = -1;
+               }
+               if (dev->callfds[i] >= 0) {
+                       close(dev->callfds[i]);
+                       dev->callfds[i] = -1;
+               }
+       }
 }
 
 static int
 virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
 {
        uint32_t i;
-       struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+       struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
 
        if (!eth_dev->intr_handle) {
                eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle));
@@ -352,11 +359,7 @@ virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
        eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
        /* For virtio vdev, no need to read counter for clean */
        eth_dev->intr_handle->efd_counter_size = 0;
-       eth_dev->intr_handle->fd = -1;
-       if (dev->vhostfd >= 0)
-               eth_dev->intr_handle->fd = dev->vhostfd;
-       else if (dev->is_server)
-               eth_dev->intr_handle->fd = dev->listenfd;
+       eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev);
 
        return 0;
 }
@@ -411,45 +414,26 @@ exit:
 static int
 virtio_user_dev_setup(struct virtio_user_dev *dev)
 {
-       uint32_t q;
-
-       dev->vhostfd = -1;
-       dev->vhostfds = NULL;
-       dev->tapfds = NULL;
-
        if (dev->is_server) {
                if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) {
                        PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!");
                        return -1;
                }
+       }
+
+       switch (dev->backend_type) {
+       case VIRTIO_USER_BACKEND_VHOST_USER:
                dev->ops = &virtio_ops_user;
-       } else {
-               if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) {
-                       dev->ops = &virtio_ops_user;
-               } else if (dev->backend_type ==
-                                       VIRTIO_USER_BACKEND_VHOST_KERNEL) {
-                       dev->ops = &virtio_ops_kernel;
-
-                       dev->vhostfds = malloc(dev->max_queue_pairs *
-                                              sizeof(int));
-                       dev->tapfds = malloc(dev->max_queue_pairs *
-                                            sizeof(int));
-                       if (!dev->vhostfds || !dev->tapfds) {
-                               PMD_INIT_LOG(ERR, "(%s) Failed to allocate FDs", dev->path);
-                               return -1;
-                       }
-
-                       for (q = 0; q < dev->max_queue_pairs; ++q) {
-                               dev->vhostfds[q] = -1;
-                               dev->tapfds[q] = -1;
-                       }
-               } else if (dev->backend_type ==
-                               VIRTIO_USER_BACKEND_VHOST_VDPA) {
-                       dev->ops = &virtio_ops_vdpa;
-               } else {
-                       PMD_DRV_LOG(ERR, "(%s) Unknown backend type", dev->path);
-                       return -1;
-               }
+               break;
+       case VIRTIO_USER_BACKEND_VHOST_KERNEL:
+               dev->ops = &virtio_ops_kernel;
+               break;
+       case VIRTIO_USER_BACKEND_VHOST_VDPA:
+               dev->ops = &virtio_ops_vdpa;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "(%s) Unknown backend type", dev->path);
+               return -1;
        }
 
        if (dev->ops->setup(dev) < 0) {
@@ -459,15 +443,22 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
 
        if (virtio_user_dev_init_notify(dev) < 0) {
                PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers\n", dev->path);
-               return -1;
+               goto destroy;
        }
 
        if (virtio_user_fill_intr_handle(dev) < 0) {
                PMD_INIT_LOG(ERR, "(%s) Failed to init interrupt handler\n", dev->path);
-               return -1;
+               goto uninit;
        }
 
        return 0;
+
+uninit:
+       virtio_user_dev_uninit_notify(dev);
+destroy:
+       dev->ops->destroy(dev);
+
+       return -1;
 }
 
 /* Use below macro to filter features from vhost backend */
@@ -489,27 +480,25 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
         1ULL << VIRTIO_NET_F_GUEST_TSO6        |       \
         1ULL << VIRTIO_F_IN_ORDER              |       \
         1ULL << VIRTIO_F_VERSION_1             |       \
-        1ULL << VIRTIO_F_RING_PACKED           |       \
-        1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
-
-#define VHOST_USER_SUPPORTED_PROTOCOL_FEATURES         \
-       (1ULL << VHOST_USER_PROTOCOL_F_MQ |             \
-        1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK |      \
-        1ULL << VHOST_USER_PROTOCOL_F_STATUS)
+        1ULL << VIRTIO_F_RING_PACKED)
 
-#define VHOST_VDPA_SUPPORTED_PROTOCOL_FEATURES         \
-       (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2   |       \
-       1ULL << VHOST_BACKEND_F_IOTLB_BATCH)
 int
 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
                     int cq, int queue_size, const char *mac, char **ifname,
                     int server, int mrg_rxbuf, int in_order, int packed_vq,
                     enum virtio_user_backend_type backend_type)
 {
-       uint64_t protocol_features = 0;
+       uint64_t backend_features;
+       int i;
 
        pthread_mutex_init(&dev->mutex, NULL);
        strlcpy(dev->path, path, PATH_MAX);
+
+       for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) {
+               dev->kickfds[i] = -1;
+               dev->callfds[i] = -1;
+       }
+
        dev->started = 0;
        dev->max_queue_pairs = queues;
        dev->queue_pairs = 1; /* mq disabled by default */
@@ -517,14 +506,9 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
        dev->is_server = server;
        dev->mac_specified = 0;
        dev->frontend_features = 0;
-       dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES;
+       dev->unsupported_features = 0;
        dev->backend_type = backend_type;
 
-       if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
-               dev->protocol_features = VHOST_USER_SUPPORTED_PROTOCOL_FEATURES;
-       else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA)
-               dev->protocol_features = VHOST_VDPA_SUPPORTED_PROTOCOL_FEATURES;
-
        parse_mac(dev, mac);
 
        if (*ifname) {
@@ -537,57 +521,22 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
                return -1;
        }
 
-       if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER)
-               dev->unsupported_features |=
-                       (1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
-
-       if (!dev->is_server) {
-               if (dev->ops->set_owner(dev) < 0) {
-                       PMD_INIT_LOG(ERR, "(%s) Failed to set backend owner", dev->path);
-                       return -1;
-               }
-
-               if (dev->ops->get_features(dev, &dev->device_features) < 0) {
-                       PMD_INIT_LOG(ERR, "(%s) Failed to get backend features", dev->path);
-                       return -1;
-               }
-
-
-               if ((dev->device_features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
-                               (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA)) {
-                       if (dev->ops->get_protocol_features(dev, &protocol_features)) {
-                               PMD_INIT_LOG(ERR, "(%s) Failed to get backend protocol features",
-                                               dev->path);
-                               return -1;
-                       }
-
-                       dev->protocol_features &= protocol_features;
-
-                       if (dev->ops->set_protocol_features(dev, dev->protocol_features)) {
-                               PMD_INIT_LOG(ERR, "(%s) Failed to set backend protocol features",
-                                               dev->path);
-                               return -1;
-                       }
-
-                       if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)))
-                               dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
-               }
-       } else {
-               /* We just pretend vhost-user can support all these features.
-                * Note that this could be problematic that if some feature is
-                * negotiated but not supported by the vhost-user which comes
-                * later.
-                */
-               dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
+       if (dev->ops->set_owner(dev) < 0) {
+               PMD_INIT_LOG(ERR, "(%s) Failed to set backend owner", dev->path);
+               return -1;
+       }
 
-               /* We cannot assume VHOST_USER_PROTOCOL_F_STATUS is supported
-                * until it's negotiated
-                */
-               dev->protocol_features &=
-                       ~(1ULL << VHOST_USER_PROTOCOL_F_STATUS);
+       if (dev->ops->get_backend_features(&backend_features) < 0) {
+               PMD_INIT_LOG(ERR, "(%s) Failed to get backend features", dev->path);
+               return -1;
        }
 
+       dev->unsupported_features = ~(VIRTIO_USER_SUPPORTED_FEATURES | backend_features);
 
+       if (dev->ops->get_features(dev, &dev->device_features) < 0) {
+               PMD_INIT_LOG(ERR, "(%s) Failed to get device features", dev->path);
+               return -1;
+       }
 
        if (!mrg_rxbuf)
                dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
@@ -646,39 +595,18 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
 void
 virtio_user_dev_uninit(struct virtio_user_dev *dev)
 {
-       uint32_t i;
-
        virtio_user_stop_device(dev);
 
        rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
 
-       for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
-               close(dev->callfds[i]);
-               close(dev->kickfds[i]);
-       }
-
-       if (dev->vhostfd >= 0)
-               close(dev->vhostfd);
-
-       if (dev->is_server && dev->listenfd >= 0) {
-               close(dev->listenfd);
-               dev->listenfd = -1;
-       }
-
-       if (dev->vhostfds) {
-               for (i = 0; i < dev->max_queue_pairs; ++i) {
-                       close(dev->vhostfds[i]);
-                       if (dev->tapfds[i] >= 0)
-                               close(dev->tapfds[i]);
-               }
-               free(dev->vhostfds);
-               free(dev->tapfds);
-       }
+       virtio_user_dev_uninit_notify(dev);
 
        free(dev->ifname);
 
        if (dev->is_server)
                unlink(dev->path);
+
+       dev->ops->destroy(dev);
 }
 
 uint8_t
@@ -693,15 +621,11 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
                return -1;
        }
 
-       /* Server mode can't enable queue pairs if vhostfd is invalid,
-        * always return 0 in this case.
-        */
-       if (!dev->is_server || dev->vhostfd >= 0) {
-               for (i = 0; i < q_pairs; ++i)
-                       ret |= dev->ops->enable_qp(dev, i, 1);
-               for (i = q_pairs; i < dev->max_queue_pairs; ++i)
-                       ret |= dev->ops->enable_qp(dev, i, 0);
-       }
+       for (i = 0; i < q_pairs; ++i)
+               ret |= dev->ops->enable_qp(dev, i, 1);
+       for (i = q_pairs; i < dev->max_queue_pairs; ++i)
+               ret |= dev->ops->enable_qp(dev, i, 0);
+
        dev->queue_pairs = q_pairs;
 
        return ret;
@@ -916,3 +840,189 @@ virtio_user_dev_update_status(struct virtio_user_dev *dev)
        pthread_mutex_unlock(&dev->mutex);
        return ret;
 }
+
+int
+virtio_user_dev_update_link_state(struct virtio_user_dev *dev)
+{
+       if (dev->ops->update_link_state)
+               return dev->ops->update_link_state(dev);
+
+       return 0;
+}
+
+static void
+virtio_user_dev_reset_queues_packed(struct rte_eth_dev *eth_dev)
+{
+       struct virtio_user_dev *dev = eth_dev->data->dev_private;
+       struct virtio_hw *hw = &dev->hw;
+       struct virtnet_rx *rxvq;
+       struct virtnet_tx *txvq;
+       uint16_t i;
+
+       /* Add lock to avoid queue contention. */
+       rte_spinlock_lock(&hw->state_lock);
+       hw->started = 0;
+
+       /*
+        * Waiting for datapath to complete before resetting queues.
+        * 1 ms should be enough for the ongoing Tx/Rx function to finish.
+        */
+       rte_delay_ms(1);
+
+       /* Vring reset for each Tx queue and Rx queue. */
+       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+               rxvq = eth_dev->data->rx_queues[i];
+               virtqueue_rxvq_reset_packed(virtnet_rxq_to_vq(rxvq));
+               virtio_dev_rx_queue_setup_finish(eth_dev, i);
+       }
+
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+               txvq = eth_dev->data->tx_queues[i];
+               virtqueue_txvq_reset_packed(virtnet_txq_to_vq(txvq));
+       }
+
+       hw->started = 1;
+       rte_spinlock_unlock(&hw->state_lock);
+}
+
+void
+virtio_user_dev_delayed_disconnect_handler(void *param)
+{
+       struct virtio_user_dev *dev = param;
+       struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
+
+       if (rte_intr_disable(eth_dev->intr_handle) < 0) {
+               PMD_DRV_LOG(ERR, "interrupt disable failed");
+               return;
+       }
+       PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d",
+                   eth_dev->intr_handle->fd);
+       if (rte_intr_callback_unregister(eth_dev->intr_handle,
+                                        virtio_interrupt_handler,
+                                        eth_dev) != 1)
+               PMD_DRV_LOG(ERR, "interrupt unregister failed");
+
+       if (dev->is_server) {
+               if (dev->ops->server_disconnect)
+                       dev->ops->server_disconnect(dev);
+
+               eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev);
+
+               PMD_DRV_LOG(DEBUG, "Registering intr fd: %d",
+                           eth_dev->intr_handle->fd);
+
+               if (rte_intr_callback_register(eth_dev->intr_handle,
+                                              virtio_interrupt_handler,
+                                              eth_dev))
+                       PMD_DRV_LOG(ERR, "interrupt register failed");
+
+               if (rte_intr_enable(eth_dev->intr_handle) < 0) {
+                       PMD_DRV_LOG(ERR, "interrupt enable failed");
+                       return;
+               }
+       }
+}
+
+static void
+virtio_user_dev_delayed_intr_reconfig_handler(void *param)
+{
+       struct virtio_user_dev *dev = param;
+       struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
+
+       PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d",
+                   eth_dev->intr_handle->fd);
+
+       if (rte_intr_callback_unregister(eth_dev->intr_handle,
+                                        virtio_interrupt_handler,
+                                        eth_dev) != 1)
+               PMD_DRV_LOG(ERR, "interrupt unregister failed");
+
+       eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev);
+
+       PMD_DRV_LOG(DEBUG, "Registering intr fd: %d", eth_dev->intr_handle->fd);
+
+       if (rte_intr_callback_register(eth_dev->intr_handle,
+                                      virtio_interrupt_handler, eth_dev))
+               PMD_DRV_LOG(ERR, "interrupt register failed");
+
+       if (rte_intr_enable(eth_dev->intr_handle) < 0)
+               PMD_DRV_LOG(ERR, "interrupt enable failed");
+}
+
+int
+virtio_user_dev_server_reconnect(struct virtio_user_dev *dev)
+{
+       int ret, old_status;
+       struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
+       struct virtio_hw *hw = &dev->hw;
+
+       if (!dev->ops->server_reconnect) {
+               PMD_DRV_LOG(ERR, "(%s) Missing server reconnect callback", dev->path);
+               return -1;
+       }
+
+       if (dev->ops->server_reconnect(dev)) {
+               PMD_DRV_LOG(ERR, "(%s) Reconnect callback call failed", dev->path);
+               return -1;
+       }
+
+       old_status = dev->status;
+
+       virtio_reset(hw);
+
+       virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
+
+       virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+
+       if (dev->ops->get_features(dev, &dev->device_features) < 0) {
+               PMD_INIT_LOG(ERR, "get_features failed: %s",
+                            strerror(errno));
+               return -1;
+       }
+
+       dev->device_features |= dev->frontend_features;
+
+       /* unmask vhost-user unsupported features */
+       dev->device_features &= ~(dev->unsupported_features);
+
+       dev->features &= dev->device_features;
+
+       /* For packed ring, resetting queues is required in reconnection. */
+       if (virtio_with_packed_queue(hw) &&
+          (old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
+               PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
+                               " when packed ring reconnecting.");
+               virtio_user_dev_reset_queues_packed(eth_dev);
+       }
+
+       virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
+
+       /* Start the device */
+       virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+       if (!dev->started)
+               return -1;
+
+       if (dev->queue_pairs > 1) {
+               ret = virtio_user_handle_mq(dev, dev->queue_pairs);
+               if (ret != 0) {
+                       PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
+                       return -1;
+               }
+       }
+       if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
+               if (rte_intr_disable(eth_dev->intr_handle) < 0) {
+                       PMD_DRV_LOG(ERR, "interrupt disable failed");
+                       return -1;
+               }
+               /*
+                * This function can be called from the interrupt handler, so
+                * we can't unregister interrupt handler here.  Setting
+                * alarm to do that later.
+                */
+               rte_eal_alarm_set(1,
+                       virtio_user_dev_delayed_intr_reconfig_handler,
+                       (void *)dev);
+       }
+       PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");
+       return 0;
+}