net/virtio-user: add mrg-rxbuf and in-order vdev parameters
[dpdk.git] / drivers / net / virtio / virtio_user / virtio_user_dev.c
index 7a70c18..953c460 100644 (file)
@@ -17,6 +17,8 @@
 #include "virtio_user_dev.h"
 #include "../virtio_ethdev.h"
 
+#define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
+
 static int
 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
 {
@@ -93,12 +95,31 @@ virtio_user_queue_setup(struct virtio_user_dev *dev,
        return 0;
 }
 
+int
+is_vhost_user_by_type(const char *path)
+{
+       struct stat sb;
+
+       if (stat(path, &sb) == -1)
+               return 0;
+
+       return S_ISSOCK(sb.st_mode);
+}
+
 int
 virtio_user_start_device(struct virtio_user_dev *dev)
 {
        uint64_t features;
        int ret;
 
+       pthread_mutex_lock(&dev->mutex);
+
+       if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
+               goto error;
+
+       /* Do not check return as already done in init, or reset in stop */
+       dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL);
+
        /* Step 0: tell vhost to create queues */
        if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
                goto error;
@@ -129,8 +150,12 @@ virtio_user_start_device(struct virtio_user_dev *dev)
         */
        dev->ops->enable_qp(dev, 0, 1);
 
+       dev->started = true;
+       pthread_mutex_unlock(&dev->mutex);
+
        return 0;
 error:
+       pthread_mutex_unlock(&dev->mutex);
        /* TODO: free resource here or caller to check */
        return -1;
 }
@@ -139,9 +164,18 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
 {
        uint32_t i;
 
+       pthread_mutex_lock(&dev->mutex);
        for (i = 0; i < dev->max_queue_pairs; ++i)
                dev->ops->enable_qp(dev, i, 0);
 
+       if (dev->ops->send_request(dev, VHOST_USER_RESET_OWNER, NULL) < 0) {
+               PMD_DRV_LOG(INFO, "Failed to reset the device\n");
+               pthread_mutex_unlock(&dev->mutex);
+               return -1;
+       }
+       dev->started = false;
+       pthread_mutex_unlock(&dev->mutex);
+
        return 0;
 }
 
@@ -166,17 +200,6 @@ parse_mac(struct virtio_user_dev *dev, const char *mac)
        }
 }
 
-int
-is_vhost_user_by_type(const char *path)
-{
-       struct stat sb;
-
-       if (stat(path, &sb) == -1)
-               return 0;
-
-       return S_ISSOCK(sb.st_mode);
-}
-
 static int
 virtio_user_dev_init_notify(struct virtio_user_dev *dev)
 {
@@ -243,12 +266,44 @@ virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
        eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
        /* For virtio vdev, no need to read counter for clean */
        eth_dev->intr_handle->efd_counter_size = 0;
+       eth_dev->intr_handle->fd = -1;
        if (dev->vhostfd >= 0)
                eth_dev->intr_handle->fd = dev->vhostfd;
+       else if (dev->is_server)
+               eth_dev->intr_handle->fd = dev->listenfd;
 
        return 0;
 }
 
+static void
+virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
+                                                const void *addr __rte_unused,
+                                                size_t len __rte_unused,
+                                                void *arg)
+{
+       struct virtio_user_dev *dev = arg;
+       uint16_t i;
+
+       pthread_mutex_lock(&dev->mutex);
+
+       if (dev->started == false)
+               goto exit;
+
+       /* Step 1: pause the active queues */
+       for (i = 0; i < dev->queue_pairs; i++)
+               dev->ops->enable_qp(dev, i, 0);
+
+       /* Step 2: update memory regions */
+       dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
+
+       /* Step 3: resume the active queues */
+       for (i = 0; i < dev->queue_pairs; i++)
+               dev->ops->enable_qp(dev, i, 1);
+
+exit:
+       pthread_mutex_unlock(&dev->mutex);
+}
+
 static int
 virtio_user_dev_setup(struct virtio_user_dev *dev)
 {
@@ -258,21 +313,32 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
        dev->vhostfds = NULL;
        dev->tapfds = NULL;
 
-       if (is_vhost_user_by_type(dev->path)) {
-               dev->ops = &ops_user;
-       } else {
-               dev->ops = &ops_kernel;
-
-               dev->vhostfds = malloc(dev->max_queue_pairs * sizeof(int));
-               dev->tapfds = malloc(dev->max_queue_pairs * sizeof(int));
-               if (!dev->vhostfds || !dev->tapfds) {
-                       PMD_INIT_LOG(ERR, "Failed to malloc");
+       if (dev->is_server) {
+               if (access(dev->path, F_OK) == 0 &&
+                   !is_vhost_user_by_type(dev->path)) {
+                       PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!");
                        return -1;
                }
-
-               for (q = 0; q < dev->max_queue_pairs; ++q) {
-                       dev->vhostfds[q] = -1;
-                       dev->tapfds[q] = -1;
+               dev->ops = &ops_user;
+       } else {
+               if (is_vhost_user_by_type(dev->path)) {
+                       dev->ops = &ops_user;
+               } else {
+                       dev->ops = &ops_kernel;
+
+                       dev->vhostfds = malloc(dev->max_queue_pairs *
+                                              sizeof(int));
+                       dev->tapfds = malloc(dev->max_queue_pairs *
+                                            sizeof(int));
+                       if (!dev->vhostfds || !dev->tapfds) {
+                               PMD_INIT_LOG(ERR, "Failed to malloc");
+                               return -1;
+                       }
+
+                       for (q = 0; q < dev->max_queue_pairs; ++q) {
+                               dev->vhostfds[q] = -1;
+                               dev->tapfds[q] = -1;
+                       }
                }
        }
 
@@ -309,13 +375,17 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
 
 int
 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
-                    int cq, int queue_size, const char *mac, char **ifname)
+                    int cq, int queue_size, const char *mac, char **ifname,
+                    int mrg_rxbuf, int in_order)
 {
+       pthread_mutex_init(&dev->mutex, NULL);
        snprintf(dev->path, PATH_MAX, "%s", path);
+       dev->started = 0;
        dev->max_queue_pairs = queues;
        dev->queue_pairs = 1; /* mq disabled by default */
        dev->queue_size = queue_size;
        dev->mac_specified = 0;
+       dev->unsupported_features = 0;
        parse_mac(dev, mac);
 
        if (*ifname) {
@@ -327,18 +397,46 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
                PMD_INIT_LOG(ERR, "backend set up fails");
                return -1;
        }
-       if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) {
-               PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
-               return -1;
+
+       if (!dev->is_server) {
+               if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER,
+                                          NULL) < 0) {
+                       PMD_INIT_LOG(ERR, "set_owner fails: %s",
+                                    strerror(errno));
+                       return -1;
+               }
+
+               if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
+                                          &dev->device_features) < 0) {
+                       PMD_INIT_LOG(ERR, "get_features failed: %s",
+                                    strerror(errno));
+                       return -1;
+               }
+       } else {
+               /* We just pretend vhost-user can support all these features.
+                * Note that this could be problematic that if some feature is
+                * negotiated but not supported by the vhost-user which comes
+                * later.
+                */
+               dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
        }
 
-       if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
-                           &dev->device_features) < 0) {
-               PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno));
-               return -1;
+       if (!mrg_rxbuf) {
+               dev->device_features &= ~(1ull << VIRTIO_NET_F_MRG_RXBUF);
+               dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
        }
-       if (dev->mac_specified)
+
+       if (!in_order) {
+               dev->device_features &= ~(1ull << VIRTIO_F_IN_ORDER);
+               dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
+       }
+
+       if (dev->mac_specified) {
                dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
+       } else {
+               dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC);
+               dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
+       }
 
        if (cq) {
                /* device does not really need to know anything about CQ,
@@ -353,6 +451,14 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
                dev->device_features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
                dev->device_features &= ~(1ull << VIRTIO_NET_F_MQ);
                dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
+               dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
+               dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
+               dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
+               dev->unsupported_features |=
+                       (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
+               dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
+               dev->unsupported_features |=
+                       (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
        }
 
        /* The backend will not report this feature, we add it explicitly */
@@ -360,6 +466,16 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
                dev->device_features |= (1ull << VIRTIO_NET_F_STATUS);
 
        dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
+       dev->unsupported_features |= ~VIRTIO_USER_SUPPORTED_FEATURES;
+
+       if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
+                               virtio_user_mem_event_cb, dev)) {
+               if (rte_errno != ENOTSUP) {
+                       PMD_INIT_LOG(ERR, "Failed to register mem event"
+                                       " callback\n");
+                       return -1;
+               }
+       }
 
        return 0;
 }
@@ -371,6 +487,8 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
 
        virtio_user_stop_device(dev);
 
+       rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
+
        for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
                close(dev->callfds[i]);
                close(dev->kickfds[i]);
@@ -378,6 +496,11 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
 
        close(dev->vhostfd);
 
+       if (dev->is_server && dev->listenfd >= 0) {
+               close(dev->listenfd);
+               dev->listenfd = -1;
+       }
+
        if (dev->vhostfds) {
                for (i = 0; i < dev->max_queue_pairs; ++i)
                        close(dev->vhostfds[i]);
@@ -386,9 +509,12 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
        }
 
        free(dev->ifname);
+
+       if (dev->is_server)
+               unlink(dev->path);
 }
 
-static uint8_t
+uint8_t
 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
 {
        uint16_t i;
@@ -400,11 +526,17 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
                return -1;
        }
 
-       for (i = 0; i < q_pairs; ++i)
-               ret |= dev->ops->enable_qp(dev, i, 1);
-       for (i = q_pairs; i < dev->max_queue_pairs; ++i)
-               ret |= dev->ops->enable_qp(dev, i, 0);
-
+       /* Server mode can't enable queue pairs if vhostfd is invalid,
+        * always return 0 in this case.
+        */
+       if (dev->vhostfd >= 0) {
+               for (i = 0; i < q_pairs; ++i)
+                       ret |= dev->ops->enable_qp(dev, i, 1);
+               for (i = q_pairs; i < dev->max_queue_pairs; ++i)
+                       ret |= dev->ops->enable_qp(dev, i, 0);
+       } else if (!dev->is_server) {
+               ret = ~0;
+       }
        dev->queue_pairs = q_pairs;
 
        return ret;