net/virtio-user: fix not properly reset device
[dpdk.git] / drivers / net / virtio / virtio_user / virtio_user_dev.c
index c4ba437..a38398b 100644 (file)
 #include "../virtio_ethdev.h"
 
 static int
-virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
 {
-       int callfd, kickfd;
+       /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
+        * firstly because vhost depends on this msg to allocate virtqueue
+        * pair.
+        */
+       int callfd;
        struct vhost_vring_file file;
-       struct vhost_vring_state state;
-       struct vring *vring = &dev->vrings[queue_sel];
-       struct vhost_vring_addr addr = {
-               .index = queue_sel,
-               .desc_user_addr = (uint64_t)(uintptr_t)vring->desc,
-               .avail_user_addr = (uint64_t)(uintptr_t)vring->avail,
-               .used_user_addr = (uint64_t)(uintptr_t)vring->used,
-               .log_guest_addr = 0,
-               .flags = 0, /* disable log */
-       };
 
        /* May use invalid flag, but some backend leverages kickfd and callfd as
         * criteria to judge if dev is alive. so finally we use real event_fd.
         */
-       callfd = eventfd(0, O_CLOEXEC | O_NONBLOCK);
+       callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
        if (callfd < 0) {
                PMD_DRV_LOG(ERR, "callfd error, %s\n", strerror(errno));
                return -1;
        }
-       kickfd = eventfd(0, O_CLOEXEC | O_NONBLOCK);
-       if (kickfd < 0) {
-               close(callfd);
-               PMD_DRV_LOG(ERR, "kickfd error, %s\n", strerror(errno));
-               return -1;
-       }
-
-       /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
-        * firstly because vhost depends on this msg to allocate virtqueue
-        * pair.
-        */
        file.index = queue_sel;
        file.fd = callfd;
        vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_CALL, &file);
        dev->callfds[queue_sel] = callfd;
 
+       return 0;
+}
+
+static int
+virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+       int kickfd;
+       struct vhost_vring_file file;
+       struct vhost_vring_state state;
+       struct vring *vring = &dev->vrings[queue_sel];
+       struct vhost_vring_addr addr = {
+               .index = queue_sel,
+               .desc_user_addr = (uint64_t)(uintptr_t)vring->desc,
+               .avail_user_addr = (uint64_t)(uintptr_t)vring->avail,
+               .used_user_addr = (uint64_t)(uintptr_t)vring->used,
+               .log_guest_addr = 0,
+               .flags = 0, /* disable log */
+       };
+
        state.index = queue_sel;
        state.num = vring->num;
        vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_NUM, &state);
@@ -97,6 +99,12 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
         * lastly because vhost depends on this msg to judge if
         * virtio is ready.
         */
+       kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+       if (kickfd < 0) {
+               PMD_DRV_LOG(ERR, "kickfd error, %s\n", strerror(errno));
+               return -1;
+       }
+       file.index = queue_sel;
        file.fd = kickfd;
        vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_KICK, &file);
        dev->kickfds[queue_sel] = kickfd;
@@ -104,44 +112,68 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
        return 0;
 }
 
-int
-virtio_user_start_device(struct virtio_user_dev *dev)
+static int
+virtio_user_queue_setup(struct virtio_user_dev *dev,
+                       int (*fn)(struct virtio_user_dev *, uint32_t))
 {
-       uint64_t features;
        uint32_t i, queue_sel;
-       int ret;
-
-       /* construct memory region inside each implementation */
-       ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_MEM_TABLE, NULL);
-       if (ret < 0)
-               goto error;
 
        for (i = 0; i < dev->max_queue_pairs; ++i) {
                queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
-               if (virtio_user_kick_queue(dev, queue_sel) < 0) {
-                       PMD_DRV_LOG(INFO, "kick rx vq fails: %u", i);
-                       goto error;
+               if (fn(dev, queue_sel) < 0) {
+                       PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i);
+                       return -1;
                }
        }
        for (i = 0; i < dev->max_queue_pairs; ++i) {
                queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
-               if (virtio_user_kick_queue(dev, queue_sel) < 0) {
-                       PMD_DRV_LOG(INFO, "kick tx vq fails: %u", i);
-                       goto error;
+               if (fn(dev, queue_sel) < 0) {
+                       PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i);
+                       return -1;
                }
        }
 
-       /* After setup all virtqueues, we need to set_features so that
-        * these features can be set into each virtqueue in vhost side.
-        * And before that, make sure VIRTIO_NET_F_MAC is stripped.
+       return 0;
+}
+
+int
+virtio_user_start_device(struct virtio_user_dev *dev)
+{
+       uint64_t features;
+       int ret;
+
+       /* Step 0: tell vhost to create queues */
+       if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
+               goto error;
+
+       /* Step 1: set features
+        * Make sure VHOST_USER_F_PROTOCOL_FEATURES is added if mq is enabled,
+        * VIRTIO_NET_F_MAC and VIRTIO_NET_F_CTRL_VQ is stripped.
         */
        features = dev->features;
+       if (dev->max_queue_pairs > 1)
+               features |= VHOST_USER_MQ;
        features &= ~(1ull << VIRTIO_NET_F_MAC);
+       features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
        ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_FEATURES, &features);
        if (ret < 0)
                goto error;
        PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
 
+       /* Step 2: share memory regions */
+       ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_MEM_TABLE, NULL);
+       if (ret < 0)
+               goto error;
+
+       /* Step 3: kick queues */
+       if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0)
+               goto error;
+
+       /* Step 4: enable queues
+        * we enable the 1st queue pair by default.
+        */
+       vhost_user_enable_queue_pair(dev->vhostfd, 0, 1);
+
        return 0;
 error:
        /* TODO: free resource here or caller to check */
@@ -150,7 +182,17 @@ error:
 
 int virtio_user_stop_device(struct virtio_user_dev *dev)
 {
-       return vhost_user_sock(dev->vhostfd, VHOST_USER_RESET_OWNER, NULL);
+       uint32_t i;
+
+       for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
+               close(dev->callfds[i]);
+               close(dev->kickfds[i]);
+       }
+
+       for (i = 0; i < dev->max_queue_pairs; ++i)
+               vhost_user_enable_queue_pair(dev->vhostfd, i, 0);
+
+       return 0;
 }
 
 static inline void
@@ -178,15 +220,20 @@ int
 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
                     int cq, int queue_size, const char *mac)
 {
-       strncpy(dev->path, path, PATH_MAX);
+       uint32_t i;
+
+       snprintf(dev->path, PATH_MAX, "%s", path);
        dev->max_queue_pairs = queues;
        dev->queue_pairs = 1; /* mq disabled by default */
        dev->queue_size = queue_size;
        dev->mac_specified = 0;
        parse_mac(dev, mac);
        dev->vhostfd = -1;
-       /* TODO: cq */
-       RTE_SET_USED(cq);
+
+       for (i = 0; i < VIRTIO_MAX_VIRTQUEUES * 2 + 1; ++i) {
+               dev->kickfds[i] = -1;
+               dev->callfds[i] = -1;
+       }
 
        dev->vhostfd = vhost_user_setup(dev->path);
        if (dev->vhostfd < 0) {
@@ -199,15 +246,34 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
        }
 
        if (vhost_user_sock(dev->vhostfd, VHOST_USER_GET_FEATURES,
-                           &dev->features) < 0) {
+                           &dev->device_features) < 0) {
                PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno));
                return -1;
        }
        if (dev->mac_specified)
-               dev->features |= (1ull << VIRTIO_NET_F_MAC);
-       /* disable it until we support CQ */
-       dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
-       dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_RX);
+               dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
+
+       if (cq) {
+               /* device does not really need to know anything about CQ,
+                * so if necessary, we just claim to support CQ
+                */
+               dev->device_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
+       } else {
+               dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
+               /* Also disable features depends on VIRTIO_NET_F_CTRL_VQ */
+               dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_RX);
+               dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
+               dev->device_features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
+               dev->device_features &= ~(1ull << VIRTIO_NET_F_MQ);
+               dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
+       }
+
+       if (dev->max_queue_pairs > 1) {
+               if (!(dev->features & VHOST_USER_MQ)) {
+                       PMD_INIT_LOG(ERR, "MQ not supported by the backend");
+                       return -1;
+               }
+       }
 
        return 0;
 }
@@ -215,12 +281,89 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
 void
 virtio_user_dev_uninit(struct virtio_user_dev *dev)
 {
-       uint32_t i;
+       close(dev->vhostfd);
+}
 
-       for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
-               close(dev->callfds[i]);
-               close(dev->kickfds[i]);
+static uint8_t
+virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
+{
+       uint16_t i;
+       uint8_t ret = 0;
+
+       if (q_pairs > dev->max_queue_pairs) {
+               PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported",
+                            q_pairs, dev->max_queue_pairs);
+               return -1;
        }
 
-       close(dev->vhostfd);
+       for (i = 0; i < q_pairs; ++i)
+               ret |= vhost_user_enable_queue_pair(dev->vhostfd, i, 1);
+       for (i = q_pairs; i < dev->max_queue_pairs; ++i)
+               ret |= vhost_user_enable_queue_pair(dev->vhostfd, i, 0);
+
+       dev->queue_pairs = q_pairs;
+
+       return ret;
+}
+
+static uint32_t
+virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
+                           uint16_t idx_hdr)
+{
+       struct virtio_net_ctrl_hdr *hdr;
+       virtio_net_ctrl_ack status = ~0;
+       uint16_t i, idx_data, idx_status;
+       uint32_t n_descs = 0;
+
+       /* locate desc for header, data, and status */
+       idx_data = vring->desc[idx_hdr].next;
+       n_descs++;
+
+       i = idx_data;
+       while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
+               i = vring->desc[i].next;
+               n_descs++;
+       }
+
+       /* locate desc for status */
+       idx_status = i;
+       n_descs++;
+
+       hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
+       if (hdr->class == VIRTIO_NET_CTRL_MQ &&
+           hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+               uint16_t queues;
+
+               queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
+               status = virtio_user_handle_mq(dev, queues);
+       }
+
+       /* Update status */
+       *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
+
+       return n_descs;
+}
+
+void
+virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
+{
+       uint16_t avail_idx, desc_idx;
+       struct vring_used_elem *uep;
+       uint32_t n_descs;
+       struct vring *vring = &dev->vrings[queue_idx];
+
+       /* Consume avail ring, using used ring idx as first one */
+       while (vring->used->idx != vring->avail->idx) {
+               avail_idx = (vring->used->idx) & (vring->num - 1);
+               desc_idx = vring->avail->ring[avail_idx];
+
+               n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
+
+               /* Update used ring */
+               uep = &vring->used->ring[avail_idx];
+               uep->id = avail_idx;
+               uep->len = n_descs;
+
+               vring->used->idx++;
+       }
 }